repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
datastax-demos/Muvr-Analytics | ipython-analysis/exercise-mlp.ipynb | bsd-3-clause | %matplotlib inline
import shutil
import numpy as np
from os import remove
import cPickle as pkl
from os.path import expanduser, exists
import os
import sys
import logging
logging.basicConfig(level=10)
logger = logging.getLogger()
# Add the mlp python src director to the import search path
mlp_folder = "../mlp"
sys.path.append(os.path.abspath(mlp_folder))
# Now we are able to use all the packages defined in mlp_folder
# import training.acceleration_dataset
"""
Explanation: MLP Experiments on muvr data
First we need to setup the environment and import all the necessary stuff.
End of explanation
"""
from training.acceleration_dataset import AccelerationDataset
dataset = AccelerationDataset('datasets/combined/18-09-15-triceps-biceps-lateral.zip')
print "Number of training examples:", dataset.num_train_examples
print "Number of test examples:", dataset.num_test_examples
print "Number of features:", dataset.num_features
print "Number of labels:", dataset.num_labels
"""
Explanation: This time we are not going to generate the data but rather use real world annotated training examples.
End of explanation
"""
from ipy_table import *
from operator import itemgetter
def calculate_dist(labels):
dist = np.zeros((dataset.num_labels, 1))
for i in range(0, len(labels)):
dist[labels[i], 0] += 1
return dist
train_dist = calculate_dist(dataset.y_train)
test_dist = calculate_dist(dataset.y_test)
train_ratio = train_dist / dataset.num_train_examples
test_ratio = test_dist / dataset.num_test_examples
# Fiddle around to get it into table shape
table = np.hstack((np.zeros((dataset.num_labels,1), dtype=int), train_dist, train_ratio, test_dist, test_ratio))
table = np.vstack((np.zeros((1, 5), dtype=int), table)).tolist()
human_labels = map(dataset.human_label_for, range(0,dataset.num_labels))
for i,s in enumerate(human_labels):
table[i + 1][0] = s
table.sort(lambda x,y: cmp(x[1], y[1]))
table[0][0] = ""
table[0][1] = "Train"
table[0][2] = "Train %"
table[0][3] = "Test"
table[0][4] = "Test %"
make_table(table)
set_global_style(float_format='%0.0f', align="center")
set_column_style(2, float_format='%0.2f%%')
set_column_style(4, float_format='%0.2f%%')
set_column_style(0, align="left")
"""
Explanation: At first we want to inspect the class distribution of the training and test examples.
End of explanation
"""
from matplotlib import pyplot, cm
from pylab import *
# Choose some random examples to plot from the training data
number_of_examples_to_plot = 3
plot_ids = np.random.random_integers(0, dataset.num_train_examples - 1, number_of_examples_to_plot)
print "Ids of plotted examples:",plot_ids
# Retrieve a human readable label given the idx of an example
def label_of_example(i):
return dataset.human_label_for(dataset.y_train[i])
figure(figsize=(20,10))
ax1 = subplot(311)
setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylabel('X - Acceleration')
ax2 = subplot(312, sharex=ax1)
setp(ax2.get_xticklabels(), visible=False)
ax2.set_ylabel('Y - Acceleration')
ax3 = subplot(313, sharex=ax1)
ax3.set_ylabel('Z - Acceleration')
for i in plot_ids:
c = np.random.random((3,))
ax1.plot(range(0, dataset.num_features / 3), dataset.X_train[i,0:400], '-o', c=c)
ax2.plot(range(0, dataset.num_features / 3), dataset.X_train[i,400:800], '-o', c=c)
ax3.plot(range(0, dataset.num_features / 3), dataset.X_train[i,800:1200], '-o', c=c)
legend(map(label_of_example, plot_ids))
suptitle('Feature values for the first three training examples', fontsize=16)
xlabel('Time')
show()
"""
Explanation: Let's have a look at the generated data. We will plot some of the examples of the different classes.
End of explanation
"""
from training.mlp_model import MLPMeasurementModel
from neon.layers import Affine, Dropout, GeneralizedCost
from neon.transforms import Rectlin, Logistic
from neon.initializers import Uniform, Constant
from neon.models import Model
# Lets define our neural network
init_norm = Uniform(low=-0.1,high=0.1)
bias_init = Constant(val = 1.0)
layers = []
layers.append(Affine(
nout=250,
init=init_norm,
bias=bias_init,
activation=Rectlin()))
layers.append(Dropout(
name="do_2",
keep = 0.9))
layers.append(Affine(
nout=100,
init=init_norm,
bias=bias_init,
activation=Rectlin()))
layers.append(Dropout(
name="do_3",
keep = 0.9))
layers.append(Affine(
nout = dataset.num_labels,
init=init_norm,
bias=bias_init,
activation=Logistic()))
mlpmodel = MLPMeasurementModel('models')
model = mlpmodel.train(dataset)
"""
Explanation: Now we are going to create a neon model. We will start with a realy simple one layer preceptron having 500 hidden units.
End of explanation
"""
import numpy as np
import math
from matplotlib import pyplot, cm
from pylab import *
from IPython.html import widgets
from IPython.html.widgets import interact
layer_names = map(lambda l: l[1].name+"_"+str(l[0]), filter(lambda l: l[1].has_params, enumerate(model.layers)))
def closestSqrt(i):
N = int(math.sqrt(i))
while True:
M = int(i / N)
if N * M == i:
return N, M
N -= 1
def plot_filters(**kwargs):
n = kwargs['n']
layer_name = kwargs['layer']
dest_path = os.path.join('models', 'workout-mlp-ep_' + str(n))
params = pkl.load(open(dest_path, 'r'))
wts = params['layer_params_states'][layer_names.index(layer_name)]['params']
nrows, ncols = closestSqrt(wts.shape[0])
fr, fc = closestSqrt(wts.shape[1])
fi = 0
W = np.zeros((fr*nrows, fc*ncols))
for row, col in [(row, col) for row in range(nrows) for col in range(ncols)]:
W[fr*row:fr*(row+1):,fc*col:fc*(col+1)] = wts[fi].reshape(fr,fc)
fi = fi + 1
matshow(W, cmap=cm.gray)
title('Visualizing weights of '+layer_name+' in epoch ' + str(n) )
show()
_i = interact(plot_filters,
layer=widgets.widget_selection.ToggleButtons(options = layer_names),
n=widgets.IntSlider(description='epochs',
min=0, max=mlpmodel.max_epochs-1, value=0, step=1))
"""
Explanation: To check weather the network is learning something we will plot the weight matrices of the different training epochs.
End of explanation
"""
import h5py
from neon.transforms import Misclassification
logged = h5py.File(os.path.join('models', 'workout-mlp.h5'), "r")
epochs = logged['config'].attrs['total_epochs']
minibatches = logged['config'].attrs['total_minibatches']
print('Test error = %.1f%%'
% (model.eval(dataset.test(), metric=Misclassification())*100))
print('Train error = %.1f%%'
% (model.eval(dataset.train(), metric=Misclassification())*100))
print "Minibatches per epoch: %0.2f" % (minibatches * 1.0/epochs)
pyplot.plot(range(0, minibatches), logged['cost/train'], linewidth=3, label='train')
pyplot.plot(range(minibatches / epochs, minibatches, minibatches / epochs), logged['cost/validation'], linewidth=3, label='test')
pyplot.grid()
pyplot.legend()
pyplot.xlabel("minibatch")
pyplot.ylabel("cost")
pyplot.show()
"""
Explanation: Let's visualize the error to see if the network has trained properly or if we are overfitting.
End of explanation
"""
from sklearn.metrics import confusion_matrix
from ipy_table import *
def predict(model, dataset):
running_error = 0.0
nprocessed = 0
dataset.reset()
predictions = np.empty((3, 0), dtype="float32")
nprocessed = 0
for x, t in dataset:
pred = model.fprop(x, inference=True).asnumpyarray()
bsz = min(dataset.ndata - nprocessed, model.be.bsz)
nprocessed += bsz
predictions = np.hstack((predictions, pred[:,:bsz]))
return predictions
# confusion_matrix(y_true, y_pred)
predicted = predict(model, dataset.test())
y_true = dataset.y_test
y_pred = np.argmax(predicted, axis = 0)
confusion_mat = confusion_matrix(y_true, y_pred, range(0,dataset.num_labels))
# Fiddle around with cm to get it into table shape
confusion_mat = vstack((np.zeros((1,dataset.num_labels), dtype=int), confusion_mat))
confusion_mat = hstack((np.zeros((dataset.num_labels + 1, 1), dtype=int), confusion_mat))
table = confusion_mat.tolist()
human_labels = map(dataset.human_label_for, range(0,dataset.num_labels))
for i,s in enumerate(human_labels):
table[0][i+1] = s
table[i+1][0] = s
table[0][0] = "actual \ predicted"
mt = make_table(table)
set_row_style(0, color='lightGray', rotate = "315deg")
set_column_style(0, color='lightGray')
set_global_style(align='center')
for i in range(1, dataset.num_labels + 1):
for j in range(1, dataset.num_labels + 1):
if i == j:
set_cell_style(i,j, color='lightGreen', width = 80)
elif table[i][j] > 20:
set_cell_style(i,j, color='Pink')
elif table[i][j] > 0:
set_cell_style(i,j, color='lightYellow')
mt
"""
Explanation: Let's also have a look at the confusion matrix for the test dataset.
End of explanation
"""
|
karlstroetmann/Formal-Languages | Python/Test-DFA-2-RegExp.ipynb | gpl-2.0 | %run DFA-2-RegExp.ipynb
%run FSM-2-Dot.ipynb
delta = { (0, 'a'): 0,
(0, 'b'): 1,
(1, 'a'): 1
}
A = {0, 1}, {'a', 'b'}, delta, 0, {1}
g, _ = dfa2dot(A)
g
r = dfa_2_regexp(A)
r
"""
Explanation: Test DFA-2-RegExp
End of explanation
"""
%run Rewrite.ipynb
s = simplify(r, Rules)
s
"""
Explanation: As this regular expression is nearly unreadable, The notebook Rewrite.ipynb contains the definition of the function simplify that can be used to simplify this expression.
End of explanation
"""
def regexp_2_string(r):
if r == 0:
return '0'
if r == '': # epsilon
return '""'
if isinstance(r, str): # single characters
return r
if r[0] == '&': # concatenation
r1, r2 = r[1:]
return regexp_2_string(r1) + regexp_2_string(r2)
if r[0] == '+':
r1, r2 = r[1:]
return '(' + regexp_2_string(r1) + '+' + regexp_2_string(r2) + ')'
if r[0] == '*':
r1 = r[1]
if isinstance(r1, str):
return regexp_2_string(r1) +'*'
else:
return '(' + regexp_2_string(r1) + ')*'
raise Exception(f'{r} is not a suitable regular expression')
print(regexp_2_string(s))
"""
Explanation: The function regexp_2_string takes a regular expression that is represented as a nested tuple and transforms it into a string.
End of explanation
"""
|
RomanSC/python-problems | notebooks/.ipynb_checkpoints/Coin Problem Take Two-checkpoint.ipynb | gpl-3.0 | import sys
sys.path.append('../')
from coins import *
"""
Explanation: <h1>Coin Problem:</h1>
<strong>The puzzle:</strong>
<i>You place 100 coins heads up in a row and number them by position, with the coin all the way on the left No. 1 and the one on the rightmost edge No. 100. Next, for every number N, from 1 to 100, you flip over every coin whose position is a multiple of N. For example, first you'll flip over all the coins, because every number is a multiple of 1. Then you'll flip over all the even-numbered coins, because they're multiples of 2. Then you'll flip coins No. 3, 6, 9, 12, and so on.</i>
<i>What do the coins look like when you're done? Specifically, which coins are heads down?</i>
<strong>Answer:</strong>
If you begin with all coins heads facing up all of the coins will end up flipped heads down. They should end up being flipped on the opposite side that they were placed on to begin with before flipping.
<strong>Discussion:</strong>
I chose this puzzle out of 7 because it looked interesting from both a mathematical and programming angle and also easily implementable. If I had not solved this puzzle using an algorithm I would have spent most of my time tediously flipping coins and hopefully keeping track accurately in order to end up with the right results. This made it a perfect problem to apply from a computer science perspective, afterall all software which are algorithms serve the purpose of taking large tasks and solving them quickly doing most of the work for the user.
Not only did this puzzle have an interesting programming angle, while solving the problem which took two attempts on my part I found myself wondering what the mathematical basis for this problem is. My algorithm asks for each coin and step (which we can let be N), "Is the coin's number divisible by N such that it becomes equal to 0?" If yes, we flip the coin. This is a different question than "Is the coin's number a multiple of N?", which implicates a couple of things. One, my algorithm wouldn't be able to work with negative numbers, more on that in a second. Two, also that it wouldn't be able to work with decimals or non-whole numbers, because it counts each step using whole numbers. Finally and most fascinating, this puzzle seems to demonstrate the properties of multiplicative inverse. This was revealed to me in my first attempt when I was printing out each flip (step) to the terminal as I was coding the algorithm. I hope to demonstrate this with a picture at the end of my notes.
<h3>Algorithm:</h3>
First import coins.py, using sys to traverse up a directory though...
End of explanation
"""
coins = gen_coins(heads='+')
print(coins)
"""
Explanation: We start with coins facing heads up:
End of explanation
"""
flipped_coins = flip_coins(coins, tails='-')
print(flipped_coins)
"""
Explanation: Then we use the algorithm to do the flipping for us:
End of explanation
"""
def gen_coins(heads='.', n=100):
for i in range(1, n + 1):
coins = heads * n
return coins
def flip_coins(coins, tails='·'):
flipped_coins = []
for c in range(len(coins)):
flipped_coins.append(coins[c])
for i in range(len(flipped_coins)):
for n in range(len(flipped_coins)):
if i == 0:
flipped_coins[i] = tails
try:
if n % i == 0:
flipped_coins[i] = tails
#print((n % i), end='')
except ZeroDivisionError:
pass
print(''.join(flipped_coins))
return ''.join(flipped_coins)
def main():
coins = gen_coins(heads='-', n=100)
#print('')
#print(coins)
flipped_coins = flip_coins(coins, tails='+')
#print('')
#print(flipped_coins)
if __name__ == '__main__':
main()
"""
Explanation: <pre>
<!-- HTML generated using hilite.me --><div style="background: #f0f0f0; overflow:auto;width:auto;border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;"><pre style="margin: 0; line-height: 125%"><span style="color: #4070a0; font-style: italic">""" coins.py | Tue, Feb 07, 2017 | Roman S. Collins</span>
<span style="color: #4070a0; font-style: italic"> The problem:</span>
<span style="color: #4070a0; font-style: italic"> You place 100 coins heads up in a row and number them by position, with the coin all the way on the left No. 1 and the one on the rightmost edge</span>
<span style="color: #4070a0; font-style: italic"> No. 100. Next, for every number N, from 1 to 100, you flip over every coin whose position is a multiple of N. For example, first you'll flip over</span>
<span style="color: #4070a0; font-style: italic"> every coin whose position is a multiple of N. For example. First you'll flip over all the coins, because every number is a multiple of 1. Then you'll</span>
<span style="color: #4070a0; font-style: italic"> flip over all the even-numbered coins, because theyre multiples of 2. Then you'll flip coins No. 3, 6, 9, 12 and so on.</span>
<span style="color: #4070a0; font-style: italic">"""</span>
<span style="color: #007020; font-weight: bold">def</span> <span style="color: #06287e">gen_coins</span>(heads<span style="color: #666666">=</span><span style="color: #4070a0">'.'</span>, n<span style="color: #666666">=</span><span style="color: #40a070">100</span>):
<span style="color: #007020; font-weight: bold">for</span> i <span style="color: #007020; font-weight: bold">in</span> <span style="color: #007020">range</span>(<span style="color: #40a070">1</span>, n <span style="color: #666666">+</span> <span style="color: #40a070">1</span>):
coins <span style="color: #666666">=</span> heads <span style="color: #666666">*</span> n
<span style="color: #007020; font-weight: bold">return</span> coins
<span style="color: #007020; font-weight: bold">def</span> <span style="color: #06287e">flip_coins</span>(coins, tails<span style="color: #666666">=</span><span style="color: #4070a0">'·'</span>):
flipped_coins <span style="color: #666666">=</span> []
<span style="color: #007020; font-weight: bold">for</span> c <span style="color: #007020; font-weight: bold">in</span> <span style="color: #007020">range</span>(<span style="color: #007020">len</span>(coins)):
flipped_coins<span style="color: #666666">.</span>append(coins[c])
<span style="color: #007020; font-weight: bold">for</span> i <span style="color: #007020; font-weight: bold">in</span> <span style="color: #007020">range</span>(<span style="color: #007020">len</span>(flipped_coins)):
<span style="color: #007020; font-weight: bold">for</span> n <span style="color: #007020; font-weight: bold">in</span> <span style="color: #007020">range</span>(<span style="color: #007020">len</span>(flipped_coins)):
<span style="color: #007020; font-weight: bold">if</span> i <span style="color: #666666">==</span> <span style="color: #40a070">0</span>:
flipped_coins[i] <span style="color: #666666">=</span> tails
<span style="color: #007020; font-weight: bold">try</span>:
<span style="color: #007020; font-weight: bold">if</span> n <span style="color: #666666">%</span> i <span style="color: #666666">==</span> <span style="color: #40a070">0</span>:
flipped_coins[i] <span style="color: #666666">=</span> tails
<span style="color: #60a0b0; font-style: italic">#print((n % i), end='')</span>
<span style="color: #007020; font-weight: bold">except</span> <span style="color: #007020">ZeroDivisionError</span>:
<span style="color: #007020; font-weight: bold">pass</span>
<span style="color: #007020; font-weight: bold">return</span> <span style="color: #4070a0">''</span><span style="color: #666666">.</span>join(flipped_coins)
<span style="color: #007020; font-weight: bold">def</span> <span style="color: #06287e">main</span>():
coins <span style="color: #666666">=</span> gen_coins(heads<span style="color: #666666">=</span><span style="color: #4070a0">'-'</span>)
<span style="color: #007020; font-weight: bold">print</span>(<span style="color: #4070a0">''</span>)
<span style="color: #007020; font-weight: bold">print</span>(coins)
flipped_coins <span style="color: #666666">=</span> flip_coins(coins, tails<span style="color: #666666">=</span><span style="color: #4070a0">'+'</span>)
<span style="color: #007020; font-weight: bold">print</span>(<span style="color: #4070a0">''</span>)
<span style="color: #007020; font-weight: bold">print</span>(flipped_coins)
<span style="color: #007020; font-weight: bold">if</span> __name__ <span style="color: #666666">==</span> <span style="color: #4070a0">'__main__'</span>:
main()
</pre></div>
</pre>
<h3>Multiplicative Inverse Property:</h3>
(The result is beautiful.)
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive/02_tensorflow/b_tfstart_graph.ipynb | apache-2.0 | import tensorflow as tf
print(tf.__version__)
"""
Explanation: Getting started with TensorFlow (Graph Mode)
Learning Objectives
- Understand the difference between Tensorflow's two modes: Eager Execution and Graph Execution
- Get used to deferred execution paradigm: first define a graph then run it in a tf.Session()
- Understand how to parameterize a graph using tf.placeholder() and feed_dict
- Understand the difference between constant Tensors and variable Tensors, and how to define each
- Practice using mid-level tf.train module for gradient descent
Introduction
Eager Execution
Eager mode evaluates operations immediatley and return concrete values immediately. To enable eager mode simply place tf.enable_eager_execution() at the top of your code. We recommend using eager execution when prototyping as it is intuitive, easier to debug, and requires less boilerplate code.
Graph Execution
Graph mode is TensorFlow's default execution mode (although it will change to eager in TF 2.0). In graph mode operations only produce a symbolic graph which doesn't get executed until run within the context of a tf.Session(). This style of coding is less inutitive and has more boilerplate, however it can lead to performance optimizations and is particularly suited for distributing training across multiple devices. We recommend using delayed execution for performance sensitive production code.
End of explanation
"""
a = tf.constant(value = [5, 3, 8], dtype = tf.int32)
b = tf.constant(value = [3, -1, 2], dtype = tf.int32)
c = tf.add(x = a, y = b)
print(c)
"""
Explanation: Graph Execution
Adding Two Tensors
Build the Graph
Unlike eager mode, no concrete value will be returned yet. Just a name, shape and type are printed. Behind the scenes a directed graph is being created.
End of explanation
"""
with tf.Session() as sess:
result = sess.run(fetches = c)
print(result)
"""
Explanation: Run the Graph
A graph can be executed in the context of a tf.Session(). Think of a session as the bridge between the front-end Python API and the back-end C++ execution engine.
Within a session, passing a tensor operation to run() will cause Tensorflow to execute all upstream operations in the graph required to calculate that value.
End of explanation
"""
a = tf.placeholder(dtype = tf.int32, shape = [None])
b = tf.placeholder(dtype = tf.int32, shape = [None])
c = tf.add(x = a, y = b)
with tf.Session() as sess:
result = sess.run(fetches = c, feed_dict = {
a: [3, 4, 5],
b: [-1, 2, 3]
})
print(result)
"""
Explanation: Parameterizing the Grpah
What if values of a and b keep changing? How can you parameterize them so they can be fed in at runtime?
Step 1: Define Placeholders
Define a and b using tf.placeholder(). You'll need to specify the data type of the placeholder, and optionally a tensor shape.
Step 2: Provide feed_dict
Now when invoking run() within the tf.Session(), in addition to providing a tensor operation to evaluate, you also provide a dictionary whose keys are the names of the placeholders.
End of explanation
"""
X = tf.constant(value = [1,2,3,4,5,6,7,8,9,10], dtype = tf.float32)
Y = 2 * X + 10
print("X:{}".format(X))
print("Y:{}".format(Y))
"""
Explanation: Linear Regression
Toy Dataset
We'll model the following:
\begin{equation}
y= 2x + 10
\end{equation}
End of explanation
"""
with tf.variable_scope(name_or_scope = "training", reuse = tf.AUTO_REUSE):
w0 = tf.get_variable(name = "w0", initializer = tf.constant(value = 0.0, dtype = tf.float32))
w1 = tf.get_variable(name = "w1", initializer = tf.constant(value = 0.0, dtype = tf.float32))
Y_hat = w0 * X + w1
loss_mse = tf.reduce_mean(input_tensor = (Y_hat - Y)**2)
"""
Explanation: 2.2 Loss Function
Using mean squared error, our loss function is:
\begin{equation}
MSE = \frac{1}{m}\sum_{i=1}^{m}(\hat{Y}_i-Y_i)^2
\end{equation}
$\hat{Y}$ represents the vector containing our model's predictions:
\begin{equation}
\hat{Y} = w_0X + w_1
\end{equation}
Note below we introduce TF variables for the first time. Unlike constants, variables are mutable.
Browse the official TensorFlow guide on variables for more information on when/how to use them.
End of explanation
"""
LEARNING_RATE = tf.placeholder(dtype = tf.float32, shape = None)
optimizer = tf.train.GradientDescentOptimizer(learning_rate = LEARNING_RATE).minimize(loss = loss_mse)
"""
Explanation: Optimizer
An optimizer in TensorFlow both calculates gradients and updates weights. In addition to basic gradient descent, TF provides implementations of several more advanced optimizers such as ADAM and FTRL. They can all be found in the tf.train module.
Note below we're not expclictly telling the optimizer which tensors are our weight tensors. So how does it know what to update? Optimizers will update all variables in the tf.GraphKeys.TRAINABLE_VARIABLES collection. All variables are added to this collection by default. Since our only variables are w0 and w1, this is the behavior we want. If we had a variable that we didn't want to be added to the collection we would set trainable=false when creating it.
End of explanation
"""
STEPS = 1000
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # initialize variables
for step in range(STEPS):
#1. Calculate gradients and update seights
sess.run(fetches = optimizer, feed_dict = {LEARNING_RATE: 0.02})
#2. Periodically print MSE
if step % 100 == 0:
print("STEP: {} MSE: {}".format(step, sess.run(fetches = loss_mse)))
# Print final MSE and weights
print("STEP: {} MSE: {}".format(STEPS, sess.run(loss_mse)))
print("w0:{}".format(round(float(sess.run(w0)), 4)))
print("w1:{}".format(round(float(sess.run(w1)), 4)))
"""
Explanation: Training Loop
Note our results are identical to what we found in Eager mode.
End of explanation
"""
|
metpy/MetPy | v0.12/_downloads/6535033cff935ab2c434cdad6eb5b4f7/Wind_SLP_Interpolation.ipynb | bsd-3-clause | import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from metpy.calc import wind_components
from metpy.cbook import get_test_data
from metpy.interpolate import interpolate_to_grid, remove_nan_observations
from metpy.plots import add_metpy_logo
from metpy.units import units
to_proj = ccrs.AlbersEqualArea(central_longitude=-97., central_latitude=38.)
"""
Explanation: Wind and Sea Level Pressure Interpolation
Interpolate sea level pressure, as well as wind component data,
to make a consistent looking analysis, featuring contours of pressure and wind barbs.
End of explanation
"""
with get_test_data('station_data.txt') as f:
data = pd.read_csv(f, header=0, usecols=(2, 3, 4, 5, 18, 19),
names=['latitude', 'longitude', 'slp', 'temperature', 'wind_dir',
'wind_speed'],
na_values=-99999)
"""
Explanation: Read in data
End of explanation
"""
lon = data['longitude'].values
lat = data['latitude'].values
xp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T
"""
Explanation: Project the lon/lat locations to our final projection
End of explanation
"""
x_masked, y_masked, pres = remove_nan_observations(xp, yp, data['slp'].values)
"""
Explanation: Remove all missing data from pressure
End of explanation
"""
slpgridx, slpgridy, slp = interpolate_to_grid(x_masked, y_masked, pres, interp_type='cressman',
minimum_neighbors=1, search_radius=400000,
hres=100000)
"""
Explanation: Interpolate pressure using Cressman interpolation
End of explanation
"""
wind_speed = (data['wind_speed'].values * units('m/s')).to('knots')
wind_dir = data['wind_dir'].values * units.degree
good_indices = np.where((~np.isnan(wind_dir)) & (~np.isnan(wind_speed)))
x_masked = xp[good_indices]
y_masked = yp[good_indices]
wind_speed = wind_speed[good_indices]
wind_dir = wind_dir[good_indices]
"""
Explanation: Get wind information and mask where either speed or direction is unavailable
End of explanation
"""
u, v = wind_components(wind_speed, wind_dir)
windgridx, windgridy, uwind = interpolate_to_grid(x_masked, y_masked, np.array(u),
interp_type='cressman', search_radius=400000,
hres=100000)
_, _, vwind = interpolate_to_grid(x_masked, y_masked, np.array(v), interp_type='cressman',
search_radius=400000, hres=100000)
"""
Explanation: Calculate u and v components of wind and then interpolate both.
Both will have the same underlying grid so throw away grid returned from v interpolation.
End of explanation
"""
x_masked, y_masked, t = remove_nan_observations(xp, yp, data['temperature'].values)
tempx, tempy, temp = interpolate_to_grid(x_masked, y_masked, t, interp_type='cressman',
minimum_neighbors=3, search_radius=400000, hres=35000)
temp = np.ma.masked_where(np.isnan(temp), temp)
"""
Explanation: Get temperature information
End of explanation
"""
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
fig = plt.figure(figsize=(20, 10))
add_metpy_logo(fig, 360, 120, size='large')
view = fig.add_subplot(1, 1, 1, projection=to_proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE.with_scale('50m'))
view.add_feature(cfeature.BORDERS, linestyle=':')
cs = view.contour(slpgridx, slpgridy, slp, colors='k', levels=list(range(990, 1034, 4)))
view.clabel(cs, inline=1, fontsize=12, fmt='%i')
mmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels)
view.barbs(windgridx, windgridy, uwind, vwind, alpha=.4, length=5)
view.set_title('Surface Temperature (shaded), SLP, and Wind.')
plt.show()
"""
Explanation: Set up the map and plot the interpolated grids appropriately.
End of explanation
"""
|
eds-uga/csci1360-fa16 | assignments/A6/A6_Q1.ipynb | mit | truth = "This is some text.\nMore text, but on a different line!\nInsert your favorite meme here.\n"
pred = read_file_contents("q1data/file1.txt")
assert truth == pred
retval = -1
try:
retval = read_file_contents("nonexistent/path.txt")
except:
assert False
else:
assert retval is None
"""
Explanation: Q1
In this question, we'll review the basics of file I/O (file input/output) and the various function calls and modes required (this will draw on material from L14).
A
Write a function read_file_contents which takes a string pathname as an argument, and returns a single string that contains all the contents of the file. Don't import any additional packages.
If I have a file random_text.txt, I'll give the full path to this file to the function: contents = read_file_contents("random_text.txt"), and I should get back a single string contents that contains all the contents of the file.
NOTE: Your function should be able to handle errors gracefully! If an error occurs when trying to read from the file, your function should return None (note the capitalization of the first letter).
End of explanation
"""
truth = "Yo dawg, I heard yo and yo dawg like yo-yos.\nSo we put yo dawg in a yo-yo.\nSo yo can yo-yo yo dawg while yo dawg yo-yos, dawg.\nMaximum ridiculousness reached.\n"
pred = read_file("q1data/file2.txt")
assert truth == pred
truth = ['Yo dawg, I heard yo and yo dawg like yo-yos.\n',
'So we put yo dawg in a yo-yo.\n',
'So yo can yo-yo yo dawg while yo dawg yo-yos, dawg.\n',
'Maximum ridiculousness reached.\n']
pred = read_file("q1data/file2.txt", as_list = True)
for item in truth:
assert item in pred
for item in pred:
assert item in truth
retval = -1
try:
retval = read_file("another/nonexistent/path.txt")
except:
assert False
else:
assert retval is None
"""
Explanation: B
This time, write a function read_file that takes two arguments: the first is the path to the file (same as before), and the second is an optional boolean argument as_list that defaults to False. When this flag is False (the default), your function should behave identically to read_file_contents. In fact, if as_list is False, you can just call your previous function.
If as_list is True, instead of returning a single string of the file's contents, return a list of strings, where each item in the list is a line from the file.
NOTE: Your function should be able to handle errors gracefully! If an error occurs when trying to read from the file, your function should return None (note the capitalization of the first letter).
End of explanation
"""
import os.path
assert count_lines("q1data/file1.txt", "q1data/file1_out.txt")
assert os.path.exists("q1data/file1_out.txt")
assert int(open("q1data/file1_out.txt", "r").read()) == 3
r1 = None
try:
r1 = count_lines("yet/another/nonexistent/path.txt", "meaningless")
except:
assert False
else:
assert not r1
r2 = None
try:
r2 = count_lines("q1data/file1.txt", "/this/should/throw/an/error.txt")
except:
assert False
else:
assert not r2
"""
Explanation: C
In this question, you'll read from one file, perform a simple computation, and write the results to a new file.
Write a function count_lines that takes two arguments: the first is a path to a file to read, the second is the path to an output file. Your function will count the number of lines in the file at the first argument, and write this number to a file at the second argument.
Your function should return True on success, and False if an error occurred.
NOTE: Your function should be able to handle errors gracefully! If an error occurs when trying to read from the file or write to the output file, your function should return False.
End of explanation
"""
if os.path.exists("q1data/out_again.txt"):
os.remove("q1data/out_again.txt")
assert acount_lines("q1data/file1.txt", "q1data/out_again.txt")
assert os.path.exists("q1data/out_again.txt")
assert int(open("q1data/out_again.txt", "r").read()) == 3
assert acount_lines("q1data/file2.txt", "q1data/out_again.txt")
assert os.path.exists("q1data/out_again.txt")
assert int("".join(open("q1data/out_again.txt", "r").read().split("\n"))) == 34
r1 = None
try:
r1 = acount_lines("yet/another/nonexistent/path.txt", "meaningless")
except:
assert False
else:
assert not r1
r2 = None
try:
r2 = acount_lines("q1data/file2.txt", "/this/should/throw/an/error.txt")
except:
assert False
else:
assert not r2
"""
Explanation: D
In this question, you'll write a function acount_lines that performs the same operation as before, except in the case that the output file already exists: in this case, you'll append the line count to the file instead of overwriting it, thus preserving any existing previous line counts.
Each new appended line count should be on its own line in the output file. You may need to manually insert newline characters, which are a backslash followed by the letter n: \n
Your function should return True on success, and False if an error occurred.
NOTE: Your function should be able to handle errors gracefully! If an error occurs when trying to read from the file or write to the output file, your function should return False.
End of explanation
"""
|
chunweixu/Deep-Learning | tv-script-generation/.ipynb_checkpoints/dlnd_tv_script_generation-checkpoint.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive.
End of explanation
"""
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
HsKA-ThermalFluiddynamics/NSS-1 | 3_1-Numerik_Iterative_Verfahren.ipynb | mit | import matplotlib.pyplot as plt
import numpy as np
import math
%config InlineBackend.figure_format = 'svg'
%matplotlib inline
# linke Seite der Gleichung (left hand side)
def LHS(lamb):
return 1/np.sqrt(lamb)
# rechte Seite der Gleichung (right hand side)
def RHS(lamb, Re):
return 2.0 * np.log10(Re * np.sqrt(lamb)) - 0.8
# Array mit äquidistanten Werten für lambda und Festlegen der Re-Zahl:
lamb = np.arange(0.0001, 0.05, 0.0001);
Re = 6.4e6
plt.plot(lamb, LHS(lamb), label="LHS")
plt.plot(lamb, RHS(lamb, Re), label="RHS")
plt.axis([0, 0.02, 5, 15])
plt.ylabel('LHS, RHS')
plt.xlabel('Rohrreibungszahl $\lambda$')
plt.grid()
plt.legend()
plt.show();
"""
Explanation: Iterative Verfahren
Fixpunktiteration
Ein Beispiel aus der Rohrhydraulik:
Zur Bestimmung der Rohrreibungszahl $\lambda$ kann bei glatten, turbulent durchströmten Rohren die implizite Formel von Prandtl verwendet werden:
$$\frac{1}{\sqrt\lambda} = 2\cdot\log\left(\text{Re}\cdot\sqrt\lambda\right) - 0,8$$
Mathe-Nerds lösen die Gleichung mithilfe der Lambertschen W-Funktion, Ingenieure jedoch meist mit einem numerischen Verfahren.
Die Lösung für die Prandtl-Formel lässt sich grafisch als Schnittpunkt der linken und rechten Seite finden.
Wir definieren für die linke und rechte Seite separat:
$$LHS\left(\lambda\right) = \frac{1}{\sqrt\lambda}$$
$$RHS\left(\lambda\right) = 2\cdot\log\left(\text{Re}\cdot\sqrt\lambda\right) - 0,8$$
Für eine Reynoldszahl $\text{Re} = 6,4\cdot 10^6$ ergeben sich dann folgende Kurvenverläufe:
End of explanation
"""
# Startwert für lambda:
lamb_alt = 100
# Liste, um Zwischenergebnisse zu speichern
lambda_i = []
lambda_i.append(lamb_alt)
# Fixpunkt-Algorithmus
for iteration in range(0, 5):
lamb_neu = 1 / (RHS(lamb_alt, Re)**2)
lambda_i.append(lamb_neu)
lamb_alt = lamb_neu
fehler = (RHS(lamb_neu, Re)-LHS(lamb_neu)) / RHS(lamb_neu, Re)
plt.plot(lamb, LHS(lamb))
plt.plot(lamb, RHS(lamb, Re))
plt.plot(lambda_i, LHS(lambda_i), 'o')
for i, txt in enumerate(lambda_i):
plt.annotate(i, (lambda_i[i], LHS(lambda_i[i])))
plt.axis([0, 0.02, 5, 15])
plt.ylabel('LHS, RHS')
plt.xlabel('Rohrreibungszahl $\lambda$')
plt.show();
print ("lambda = ", lamb_neu, ", Fehler in %: ", fehler*100)
"""
Explanation: Als Lösung ("Fixpunkt"), bei dem linke und rechte Seite denselben Wert annehmen lässt sich für $\lambda$ ein Wert von etwa 0,0085 ablesen.
Mithilfe einer Fixpunktiteration lässt sich dieser auch iterativ bestimmen. Hierzu formen wir die Prandtl-Formel so um, dass auf der linken Seite nur noch $\lambda$ steht:
$$\lambda = \frac{1}{\left[RHS\left(\lambda\right)\right]^2}$$
Damit lässt sich nun eine Iterationsvorschrift formulieren:
$$\lambda_{i+1} = \frac{1}{\left[RHS\left(\lambda_i\right)\right]^2}$$
End of explanation
"""
# Startwert für lambda:
lamb_alt = 0.01
# Liste, um Zwischenergebnisse zu speichern
lambda_newton_i = []
lambda_newton_i.append(lamb_alt)
# die Funktion f
def f(lamb, Re):
return 1/np.sqrt(lamb) - 2.0 * np.log10(Re * np.sqrt(lamb)) + 0.8
# die Ableitung von f
def f_strich(lamb):
return -1/(2*lamb**1.5) - 1/(lamb*math.log(10))
# die Tangente an f (nur zur Visualisierung, wird eigentlich nicht benötigt)
def tangente_f(x, lamb, Re):
return f(lamb,Re)+f_strich(lamb)*(x-lamb)
# Newton-Verfahren:
for iteration in range(0, 15):
lamb_neu = lamb_alt - f(lamb_alt, Re)/f_strich(lamb_alt)
lambda_newton_i.append(lamb_neu)
lamb_alt = lamb_neu
fehler = (RHS(lamb_neu, Re)-LHS(lamb_neu)) / RHS(lamb_neu, Re)
# Ergebnisse im Diagramm darstellen:
plt.plot(lamb, f(lamb, Re))
plt.plot(lambda_newton_i, f(lambda_newton_i, Re), 'o')
for i, txt in enumerate(lambda_newton_i):
plt.annotate(i, (lambda_newton_i[i], f(lambda_newton_i[i], Re)))
plt.plot(lamb, tangente_f(lamb, lambda_newton_i[i], Re))
plt.plot([0,0.02],[0,0],'k', linewidth=1)
plt.axis([0, 0.02, -4, 6])
plt.ylabel('f = LHS - RHS')
plt.xlabel('Rohrreibungszahl $\lambda$')
plt.show();
print ("lambda = ", lamb_neu, ", Fehler in %: ", fehler*100)
"""
Explanation: Newton-Verfahren
Das Newton-Verfahren ist eine weitere Möglichkeit, um Gleichungen iterativ zu lösen. Hierzu wird die Gleichung so umgestellt, dass sich das Problem in eine Nullstellensuche konvertiert. Die oben behandelte Rohrreibungsgleichung ergibt dann:
$$f(\lambda) = \frac{1}{\sqrt\lambda} - 2\cdot\log\left(\text{Re}\cdot\sqrt\lambda\right) + 0,8 = 0$$
Ausgehend von einem geschätzten Startwert für die Nullstelle $\lambda_i$ wird die Steigung $f'(\lambda_i)$ der Funktion berechnet. Die Tangente im Punkt $(\lambda_i,f(\lambda_i))$ ist dann:
$$t(\lambda) = f(\lambda_i) + f'(\lambda_i)\cdot (\lambda - \lambda_i)$$
Der Schnittpunkt dieser Tangente mit der $\lambda$-Achse ergibt den neuen Näherungswert für die Nullstelle und damit die Iterationsvorschrift:
$$\lambda_{i+1} = \lambda_i - \frac{f(\lambda_i)}{f'(\lambda_i)}$$
Im Beispiel mit der Rohrreibungsgleichung ist die Ableitung:
$$f'(\lambda) = -\frac{1}{2\cdot\lambda^{3/2}} - \frac{1}{\lambda}$$
End of explanation
"""
from IPython.core.display import HTML
def css_styling():
styles = open('TFDStyle.css', 'r').read()
return HTML(styles)
css_styling()
"""
Explanation: Verfahren zur Lösung von Gleichungssystemen
Weitere Verfahren, zur Lösung von ganzen Gleichungssystemen werden in Kapitel 4 vorgestellt. Diese Verfahren werden z.B. verwendet, um die riesigen Gleichungssysteme zu lösen, die bei der Diskretisierung von Transportgleichungen mithilfe der Finite-Differenzen- (FDM), Finite-Elemente- (FEM) oder Finite-Volumen-Methode (FVM) entstehen.
Prominente Vertreter sind das Gauß-Verfahren (Gauß-Seidel-Verfahren) und der Thomas-Algorithmus.
Hier geht's weiter oder hier zurück zur Übersicht.
Copyright (c) 2018, Florian Theobald und Matthias Stripf
Der folgende Python-Code darf ignoriert werden. Er dient nur dazu, die richtige Formatvorlage für die Jupyter-Notebooks zu laden.
End of explanation
"""
|
dsacademybr/PythonFundamentos | Cap02/Notebooks/DSA-Python-Cap02-01-Numeros.ipynb | gpl-3.0 | # Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
"""
Explanation: <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 2</font>
Download: http://github.com/dsacademybr
End of explanation
"""
# Soma
4 + 4
# Subtração
4 - 3
# Multiplicação
3 * 3
# Divisão
3 / 2
# Potência
4 ** 2
# Módulo
10 % 3
"""
Explanation: Números e Operações Matemáticas
Pressione as teclas shift e enter para executar o código em uma célula ou pressione o botão Play no menu superior
End of explanation
"""
type(5)
type(5.0)
a = 'Eu sou uma string'
type(a)
"""
Explanation: Função Type
End of explanation
"""
3.1 + 6.4
4 + 4.0
4 + 4
# Resultado é um número float
4 / 2
# Resultado é um número inteiro
4 // 2
4 / 3.0
4 // 3.0
"""
Explanation: Operações com números float
End of explanation
"""
float(9)
int(6.0)
int(6.5)
"""
Explanation: Conversão
End of explanation
"""
hex(394)
hex(217)
bin(286)
bin(390)
"""
Explanation: Hexadecimal e Binário
End of explanation
"""
# Retorna o valor absoluto
abs(-8)
# Retorna o valor absoluto
abs(8)
# Retorna o valor com arredondamento
round(3.14151922,2)
# Potência
pow(4,2)
# Potência
pow(5,3)
"""
Explanation: Funções abs, round e pow
End of explanation
"""
|
scholer/cy-rest-python | basic/CytoscapeREST_Basic3.ipynb | mit | import requests
import json
import networkx as nx
from IPython.display import Image
from py2cytoscape import util as cy
import numpy as np
# Basic Setup
PORT_NUMBER = 1234
#IP = '192.168.1.1'
IP = 'localhost'
BASE = 'http://' + IP + ':' + str(PORT_NUMBER) + '/v1/'
# Header for posting data to the server as JSON
HEADERS = {'Content-Type': 'application/json'}
# Delete all networks in current session
requests.delete(BASE + 'session')
"""
Explanation: Basic Workflow 3: Visual Styles
by Keiichiro Ono
Introduction
Welcome to the part 3 of basic tutorial. In this example, you will learn how to create Visual Styles in Python.
Be sure to run
pip install -U requests
pip install -U networkx
pip install -U py2cytoscape
pip install -U numpy
before running the following cells.
End of explanation
"""
graphs = {}
NUMBER_OF_NODES = 100
# Scale-Free
g = nx.scale_free_graph(NUMBER_OF_NODES);
# Perform simple graph analysis
# Node statistics
bc = nx.betweenness_centrality(g)
degree = nx.degree(g)
cc = nx.closeness_centrality(g)
nx.set_node_attributes(g, 'betweenness', bc)
nx.set_node_attributes(g, 'closeness', cc)
nx.set_node_attributes(g, 'degree', degree)
# Network statistics
g.graph["avg_shortest_path_len"] = nx.average_shortest_path_length(g)
g.graph["density"] = nx.density(g)
"""
Explanation: Generate Networks with NetworkX
Let's make a network using NetworkX for testing Visual Style.
End of explanation
"""
# Remove all networks
requests.delete(BASE + 'networks')
cyjs_network = cy.from_networkx(g)
res1 = requests.post(BASE + 'networks', data=json.dumps(cyjs_network), headers=HEADERS)
suid_res = res1.json()
suid = suid_res['networkSUID']
requests.get(BASE + 'apply/layouts/force-directed/' + str(suid))
Image(BASE+'networks/' + str(suid) + '/views/first.png')
"""
Explanation: Send all network models to Cytoscape
And post the network to Cytoscape.
End of explanation
"""
res = requests.get(BASE + 'styles/default')
#print(json.dumps(json.loads(res.content), indent=4))
"""
Explanation: Create Visual Styles Programmatically
In Cytoscape, Visual Style is a collection of default Visual Properties and mappings. And it is easy to create it programmatically since we can describe Visual Style as a simple Python object. Let's GET default style from current session.
End of explanation
"""
style_name = 'My Visual Style'
my_style = {
"title" : style_name,
"defaults" : [ {
"visualProperty" : "EDGE_WIDTH",
"value" : 2.0
}, {
"visualProperty" : "EDGE_STROKE_UNSELECTED_PAINT",
"value" : "#555555"
}, {
"visualProperty" : "NODE_FILL_COLOR",
"value" : "#00ddee"
},{
"visualProperty" : "NODE_BORDER_WIDTH",
"value" : 0
}, {
"visualProperty" : "NODE_SIZE",
"value" : 30
}],
"mappings" : [ {
"mappingType" : "discrete",
"mappingColumn" : "degree",
"mappingColumnType" : "Double",
"visualProperty" : "NODE_FILL_COLOR",
"map" : [ {
"key" : "1",
"value" : "#eeeeee"
}, {
"key" : "4",
"value" : "#00FF11"
} ]
}, {
"mappingType" : "passthrough",
"mappingColumn" : "name",
"mappingColumnType" : "String",
"visualProperty" : "NODE_LABEL"
}, {
"mappingType": "continuous",
"visualProperty": "NODE_SIZE",
"mappingColumnType": "Double",
"mappingColumn": "degree",
"points": [
{
"lesser": "1.0",
"equal": "20.0",
"value": 1,
"greater": "20.0"
},
{
"lesser": "100.0",
"equal": "100.0",
"value": 120,
"greater": "1.0"
}
] }
]
}
# Delete all style
requests.delete(BASE + "styles")
# Create new Visual Style
res = requests.post(BASE + "styles", data=json.dumps(my_style), headers=HEADERS)
new_style_name = res.json()['title']
# Apply it to current netwrok
requests.get(BASE + 'apply/styles/' + new_style_name + '/' + str(suid))
# Display it here!
Image(BASE+'networks/' + str(suid) + '/views/first.png')
"""
Explanation: As you can see, it is a simple collection of default values and mappings and in Python, it is easy to create such object. It looks a bit lengthy, but it's straightforward:
End of explanation
"""
|
5agado/data-science-learning | deep learning/StyleGAN/StyleGAN - Latents Exploration.ipynb | apache-2.0 | from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from datetime import datetime
from tqdm import tqdm
import imageio
from ipywidgets import interact, interact_manual
from IPython.display import display
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# ffmpeg installation location, for creating videos
plt.rcParams['animation.ffmpeg_path'] = str(Path.home() / "Documents/dev_tools/ffmpeg-20190623-ffa64a4-win64-static/bin/ffmpeg.exe")
%load_ext autoreload
%autoreload 2
# StyleGAN Utils
from stylegan_utils import load_network, gen_image_fun, synth_image_fun, create_video
from stylegan_utils import load_latents, get_ipywidget_elements, load_directions
# StyleGAN2 Repo
sys.path.append(os.path.join(os.pardir, 'stylegan2encoder'))
import projector
import training.dataset
# Data Science Utils
sys.path.append(os.path.join(os.pardir, 'data-science-learning'))
from ds_utils import generative_utils
res_dir = Path.home() / 'Documents/generated_data/stylegan'
"""
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Load-Network" data-toc-modified-id="Load-Network-1"><span class="toc-item-num">1 </span>Load Network</a></span></li><li><span><a href="#Style-Mixing" data-toc-modified-id="Style-Mixing-2"><span class="toc-item-num">2 </span>Style Mixing</a></span></li><li><span><a href="#Latents-Transition/Morphing" data-toc-modified-id="Latents-Transition/Morphing-3"><span class="toc-item-num">3 </span>Latents Transition/Morphing</a></span></li><li><span><a href="#Explore-PSI" data-toc-modified-id="Explore-PSI-4"><span class="toc-item-num">4 </span>Explore PSI</a></span></li><li><span><a href="#Explore-Latents-Indexes" data-toc-modified-id="Explore-Latents-Indexes-5"><span class="toc-item-num">5 </span>Explore Latents Indexes</a></span></li></ul></div>
Playground for experiments with StyleGANv2 latents.
Includes interactive style mixing, latents interpolation or morphing and latents tweaking.
End of explanation
"""
MODELS_DIR = Path("C:/Users/User/Documents/models/stylegan2")
MODEL_NAME = 'drawing2_1024'
SNAPSHOT_NAME = 'network-snapshot-002048'
Gs, Gs_kwargs, noise_vars = load_network(str(MODELS_DIR / MODEL_NAME / SNAPSHOT_NAME) + '.pkl')
Z_SIZE = Gs.input_shape[1:][0]
IMG_SIZE = Gs.output_shape[2:]
IMG_SIZE
"""
Explanation: Load Network
End of explanation
"""
# used when saving the currently displayed image
current_displayed_latents = None
current_displayed_img = None
# save directory
save_dir = res_dir / 'projection' / MODEL_NAME / SNAPSHOT_NAME / "picked"
save_dir.mkdir(parents=True, exist_ok=True)
def mix_latents(latents_1, latents_2, layers_idxs, alpha=1.):
latents_1 = load_latents(latents_1)
latents_2 = load_latents(latents_2)
assert latents_1.shape == latents_2.shape
# crossover option, from latents_1 to latents_2
mixed_latents = latents_2.copy()
mixed_latents[layers_idxs] = latents_1[layers_idxs] * alpha + mixed_latents[layers_idxs] * (1.-alpha)
return mixed_latents
# util to get names of various latents
target_dir_name = ''
data_dir = res_dir / 'projection' / MODEL_NAME / SNAPSHOT_NAME / target_dir_name
entries = [p.name for p in data_dir.glob("*") if p.is_dir()]
entries.remove('tfrecords')
entries2 = np.arange(30)
# load directions
directions_dir = MODELS_DIR / MODEL_NAME / 'ganspace/directions_01'
directions = load_directions(directions_dir, is_ganspace=True)
print(directions.keys())
%matplotlib notebook
# Setup plot image
fig, ax = plt.subplots(figsize=(9, 9))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0, wspace=0)
plt.axis('off')
im = ax.imshow(gen_image_fun(Gs, np.random.randn(1, Z_SIZE), Gs_kwargs, noise_vars))
def on_button_clicked(b):
global current_displayed_latents, current_displayed_img
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
plt.imsave(save_dir / (timestamp + '.png'), current_displayed_img)
np.save(save_dir / (timestamp + '.npy'), current_displayed_latents)
def i_style_mixing(entry1, entry2, layers_idxs, alpha, directions_coeffs, directions_layers):
latents_1 =res_dir / 'projection' / MODEL_NAME / SNAPSHOT_NAME / target_dir_name / f'{entry1}' / 'image_latents1000.npy'
latents_2 = res_dir / 'projection' / MODEL_NAME / SNAPSHOT_NAME / 'rand_gen/psi05' / f'{entry2}.npy'
layers_idxs=np.arange(layers_idxs[0], layers_idxs[1])
# compute mixed latents
mixed_latents = mix_latents(latents_2, latents_1, layers_idxs=layers_idxs, alpha=alpha)
# add directions
for direction_name, coeff in directions_coeffs.items():
if coeff != 0.:
d_layers_idxs = directions_layers[direction_name]
mixed_latents[d_layers_idxs] = mixed_latents[d_layers_idxs] + (coeff * directions[direction_name])
# generate image
gen_image = synth_image_fun(Gs, mixed_latents[np.newaxis, :, :], Gs_kwargs, randomize_noise=True)
# store in case we want to export results from widget
global current_displayed_latents, current_displayed_img
current_displayed_latents = mix_latents
current_displayed_img = gen_image
im.set_data(gen_image)
# ipywdigets setup
display_element = get_ipywidget_elements(i_style_mixing, on_button_clicked, entries, entries2, list(directions.keys()))
display(display_element)
"""
Explanation: Style Mixing
End of explanation
"""
#PLOT_IMG_SHAPE = (512, 512, 3)
PLOT_IMG_SHAPE = (IMG_SIZE[0], IMG_SIZE[1], 3)
render_dir = res_dir / MODEL_NAME / SNAPSHOT_NAME / "explore_latent"
nb_samples = 2
nb_transition_frames = 450
nb_frames = min(450, (nb_samples-1)*nb_transition_frames)
psi=1
# run animation
for i in range(0, 2):
# setup the passed latents
z_s = np.random.randn(nb_samples, Z_SIZE)
#latents = Gs.components.mapping.run(z_s, None)
passed_latents=z_s
animate_latent_transition(latent_vectors=passed_latents,
#gen_image_fun=synth_image_fun,
gen_image_fun=lambda latents : gen_image_fun(Gs, latents, Gs_kwargs, truncation_psi=psi),
gen_latent_fun=lambda z_s, i: gen_latent_linear(passed_latents, i, nb_transition_frames),
img_size=PLOT_IMG_SHAPE,
nb_frames=nb_frames,
render_dir=render_dir / "transitions")
"""
Explanation: Latents Transition/Morphing
End of explanation
"""
#PLOT_IMG_SHAPE = (512, 512, 3)
PLOT_IMG_SHAPE = (IMG_SIZE[0], IMG_SIZE[1], 3)
render_dir = res_dir / MODEL_NAME / SNAPSHOT_NAME / 'explore_latent'
nb_samples = 20
nb_transition_frames = 24
nb_frames = min(450, (nb_samples-1)*nb_transition_frames)
# setup the passed latents
z_s = np.random.randn(nb_samples, Z_SIZE)
#latents = Gs.components.mapping.run(z_s, None)
passed_latents = z_s
# run animation
#[2., 1.5, 1., 0.7, 0.5, 0., -0.5, -0.7, -1., -1.5, -2.]
for psi in np.linspace(-0.5, 1.5, 9):
animate_latent_transition(latent_vectors=passed_latents,
#gen_image_fun=synth_image_fun,
gen_image_fun=lambda latents : gen_image_fun(Gs, latents, Gs_kwargs, truncation_psi=psi),
gen_latent_fun=lambda z_s, i: gen_latent_linear(passed_latents, i, nb_transition_frames),
img_size=PLOT_IMG_SHAPE,
nb_frames=nb_frames,
render_dir=render_dir / 'psi',
file_prefix='psi{}'.format(str(psi).replace('.', '_')[:5]))
"""
Explanation: Explore PSI
End of explanation
"""
#PLOT_IMG_SHAPE = (512, 512, 3)
PLOT_IMG_SHAPE = (IMG_SIZE[0], IMG_SIZE[1], 3)
render_dir = res_dir / MODEL_NAME / SNAPSHOT_NAME / "explore_latent"
nb_transition_frames = 48
# random list of z vectors
#rand_idx = np.random.randint(len(X_train))
z_start = np.random.randn(1, Z_SIZE)
#dlatents = Gs.components.mapping.run(z_start, None, dlatent_broadcast=None)
#vals = np.linspace(-2., 2., nb_transition_frames)
nb_styles = dlatents.shape[0]
stylelatent_vals= np.random.randn(nb_transition_frames, Z_SIZE) + np.linspace(-1., 1., nb_transition_frames)[:, np.newaxis]
for z_idx in range(nb_styles):
animate_latent_transition(latent_vectors=dlatents[0],
gen_image_fun=synth_image_fun,
gen_latent_fun=lambda z_s, i: gen_latent_style_idx(dlatents[0], i, z_idx, stylelatent_vals),
img_size=PLOT_IMG_SHAPE,
nb_frames=nb_transition_frames,
render_dir=render_dir / 'latent_indexes')
"""
Explanation: Explore Latents Indexes
End of explanation
"""
|
chongyangma/python-machine-learning-book | code/ch05/ch05.ipynb | mit | %load_ext watermark
%watermark -a 'Sebastian Raschka' -u -d -p numpy,scipy,matplotlib,sklearn
"""
Explanation: Copyright (c) 2015-2017 Sebastian Raschka
https://github.com/rasbt/python-machine-learning-book
MIT License
Python Machine Learning - Code Examples
Chapter 5 - Compressing Data via Dimensionality Reduction
Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
End of explanation
"""
from IPython.display import Image
%matplotlib inline
# Added version check for recent scikit-learn 0.18 checks
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
"""
Explanation: The use of watermark is optional. You can install this IPython extension via "pip install watermark". For more information, please see: https://github.com/rasbt/watermark.
<br>
<br>
Overview
Unsupervised dimensionality reduction via principal component analysis 128
Total and explained variance
Feature transformation
Principal component analysis in scikit-learn
Supervised data compression via linear discriminant analysis
Computing the scatter matrices
Selecting linear discriminants for the new feature subspace
Projecting samples onto the new feature space
LDA via scikit-learn
Using kernel principal component analysis for nonlinear mappings
Kernel functions and the kernel trick
Implementing a kernel principal component analysis in Python
Example 1 – separating half-moon shapes
Example 2 – separating concentric circles
Projecting new data points
Kernel principal component analysis in scikit-learn
Summary
<br>
<br>
End of explanation
"""
Image(filename='./images/05_01.png', width=400)
import pandas as pd
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
df_wine.head()
"""
Explanation: Unsupervised dimensionality reduction via principal component analysis
End of explanation
"""
df_wine = pd.read_csv('https://raw.githubusercontent.com/rasbt/python-machine-learning-book/master/code/datasets/wine/wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline']
df_wine.head()
"""
Explanation: <hr>
Note:
If the link to the Wine dataset provided above does not work for you, you can find a local copy in this repository at ./../datasets/wine/wine.data.
Or you could fetch it via
End of explanation
"""
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=0)
"""
Explanation: <hr>
Splitting the data into 70% training and 30% test subsets.
End of explanation
"""
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
"""
Explanation: Standardizing the data.
End of explanation
"""
import numpy as np
cov_mat = np.cov(X_train_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
print('\nEigenvalues \n%s' % eigen_vals)
"""
Explanation: Note
Accidentally, I wrote X_test_std = sc.fit_transform(X_test) instead of X_test_std = sc.transform(X_test). In this case, it wouldn't make a big difference since the mean and standard deviation of the test set should be (quite) similar to the training set. However, as remember from Chapter 3, the correct way is to re-use parameters from the training set if we are doing any kind of transformation -- the test set should basically stand for "new, unseen" data.
My initial typo reflects a common mistake is that some people are not re-using these parameters from the model training/building and standardize the new data "from scratch." Here's simple example to explain why this is a problem.
Let's assume we have a simple training set consisting of 3 samples with 1 feature (let's call this feature "length"):
train_1: 10 cm -> class_2
train_2: 20 cm -> class_2
train_3: 30 cm -> class_1
mean: 20, std.: 8.2
After standardization, the transformed feature values are
train_std_1: -1.21 -> class_2
train_std_2: 0 -> class_2
train_std_3: 1.21 -> class_1
Next, let's assume our model has learned to classify samples with a standardized length value < 0.6 as class_2 (class_1 otherwise). So far so good. Now, let's say we have 3 unlabeled data points that we want to classify:
new_4: 5 cm -> class ?
new_5: 6 cm -> class ?
new_6: 7 cm -> class ?
If we look at the "unstandardized "length" values in our training datast, it is intuitive to say that all of these samples are likely belonging to class_2. However, if we standardize these by re-computing standard deviation and and mean you would get similar values as before in the training set and your classifier would (probably incorrectly) classify samples 4 and 5 as class 2.
new_std_4: -1.21 -> class 2
new_std_5: 0 -> class 2
new_std_6: 1.21 -> class 1
However, if we use the parameters from your "training set standardization," we'd get the values:
sample5: -18.37 -> class 2
sample6: -17.15 -> class 2
sample7: -15.92 -> class 2
The values 5 cm, 6 cm, and 7 cm are much lower than anything we have seen in the training set previously. Thus, it only makes sense that the standardized features of the "new samples" are much lower than every standardized feature in the training set.
Eigendecomposition of the covariance matrix.
End of explanation
"""
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
import matplotlib.pyplot as plt
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center',
label='individual explained variance')
plt.step(range(1, 14), cum_var_exp, where='mid',
label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('./figures/pca1.png', dpi=300)
plt.show()
"""
Explanation: Note:
Above, I used the numpy.linalg.eig function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors.
<pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre>
This is not really a "mistake," but probably suboptimal. It would be better to use numpy.linalg.eigh in such cases, which has been designed for Hermetian matrices. The latter always returns real eigenvalues; whereas the numerically less stable np.linalg.eig can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.)
<br>
<br>
Total and explained variance
End of explanation
"""
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs.sort(key=lambda k: k[0], reverse=True)
# Note: I added the `key=lambda k: k[0]` in the sort call above
# just like I used it further below in the LDA section.
# This is to avoid problems if there are ties in the eigenvalue
# arrays (i.e., the sorting algorithm will only regard the
# first element of the tuples, now).
w = np.hstack((eigen_pairs[0][1][:, np.newaxis],
eigen_pairs[1][1][:, np.newaxis]))
print('Matrix W:\n', w)
"""
Explanation: <br>
<br>
Feature transformation
End of explanation
"""
X_train_pca = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train == l, 0],
X_train_pca[y_train == l, 1],
c=c, label=l, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./figures/pca2.png', dpi=300)
plt.show()
X_train_std[0].dot(w)
"""
Explanation: Note
Depending on which version of NumPy and LAPACK you are using, you may obtain the the Matrix W with its signs flipped. E.g., the matrix shown in the book was printed as:
[[ 0.14669811 0.50417079]
[-0.24224554 0.24216889]
[-0.02993442 0.28698484]
[-0.25519002 -0.06468718]
[ 0.12079772 0.22995385]
[ 0.38934455 0.09363991]
[ 0.42326486 0.01088622]
[-0.30634956 0.01870216]
[ 0.30572219 0.03040352]
[-0.09869191 0.54527081]
Please note that this is not an issue: If $v$ is an eigenvector of a matrix $\Sigma$, we have
$$\Sigma v = \lambda v,$$
where $\lambda$ is our eigenvalue,
then $-v$ is also an eigenvector that has the same eigenvalue, since
$$\Sigma(-v) = -\Sigma v = -\lambda v = \lambda(-v).$$
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA()
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
plt.bar(range(1, 14), pca.explained_variance_ratio_, alpha=0.5, align='center')
plt.step(range(1, 14), np.cumsum(pca.explained_variance_ratio_), where='mid')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.show()
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.6,
c=cmap(idx),
edgecolor='black',
marker=markers[idx],
label=cl)
"""
Explanation: <br>
<br>
Principal component analysis in scikit-learn
End of explanation
"""
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr = lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./figures/pca3.png', dpi=300)
plt.show()
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./figures/pca4.png', dpi=300)
plt.show()
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
"""
Explanation: Training logistic regression classifier using the first 2 principal components.
End of explanation
"""
Image(filename='./images/05_06.png', width=400)
"""
Explanation: <br>
<br>
Supervised data compression via linear discriminant analysis
End of explanation
"""
np.set_printoptions(precision=4)
mean_vecs = []
for label in range(1, 4):
mean_vecs.append(np.mean(X_train_std[y_train == label], axis=0))
print('MV %s: %s\n' % (label, mean_vecs[label - 1]))
"""
Explanation: <br>
<br>
Computing the scatter matrices
Calculate the mean vectors for each class:
End of explanation
"""
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.zeros((d, d)) # scatter matrix for each class
for row in X_train_std[y_train == label]:
row, mv = row.reshape(d, 1), mv.reshape(d, 1) # make column vectors
class_scatter += (row - mv).dot((row - mv).T)
S_W += class_scatter # sum class scatter matrices
print('Within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1]))
"""
Explanation: Compute the within-class scatter matrix:
End of explanation
"""
print('Class label distribution: %s'
% np.bincount(y_train)[1:])
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.cov(X_train_std[y_train == label].T)
S_W += class_scatter
print('Scaled within-class scatter matrix: %sx%s' % (S_W.shape[0],
S_W.shape[1]))
"""
Explanation: Better: covariance matrix since classes are not equally distributed:
End of explanation
"""
mean_overall = np.mean(X_train_std, axis=0)
d = 13 # number of features
S_B = np.zeros((d, d))
for i, mean_vec in enumerate(mean_vecs):
n = X_train[y_train == i + 1, :].shape[0]
mean_vec = mean_vec.reshape(d, 1) # make column vector
mean_overall = mean_overall.reshape(d, 1) # make column vector
S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T)
print('Between-class scatter matrix: %sx%s' % (S_B.shape[0], S_B.shape[1]))
"""
Explanation: Compute the between-class scatter matrix:
End of explanation
"""
eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
"""
Explanation: <br>
<br>
Selecting linear discriminants for the new feature subspace
Solve the generalized eigenvalue problem for the matrix $S_W^{-1}S_B$:
End of explanation
"""
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in decreasing order:\n')
for eigen_val in eigen_pairs:
print(eigen_val[0])
tot = sum(eigen_vals.real)
discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)]
cum_discr = np.cumsum(discr)
plt.bar(range(1, 14), discr, alpha=0.5, align='center',
label='individual "discriminability"')
plt.step(range(1, 14), cum_discr, where='mid',
label='cumulative "discriminability"')
plt.ylabel('"discriminability" ratio')
plt.xlabel('Linear Discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('./figures/lda1.png', dpi=300)
plt.show()
w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real,
eigen_pairs[1][1][:, np.newaxis].real))
print('Matrix W:\n', w)
"""
Explanation: Note:
Above, I used the numpy.linalg.eig function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors.
<pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre>
This is not really a "mistake," but probably suboptimal. It would be better to use numpy.linalg.eigh in such cases, which has been designed for Hermetian matrices. The latter always returns real eigenvalues; whereas the numerically less stable np.linalg.eig can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.)
Sort eigenvectors in decreasing order of the eigenvalues:
End of explanation
"""
X_train_lda = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_lda[y_train == l, 0] * (-1),
X_train_lda[y_train == l, 1] * (-1),
c=c, label=l, marker=m)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower right')
plt.tight_layout()
# plt.savefig('./figures/lda2.png', dpi=300)
plt.show()
"""
Explanation: <br>
<br>
Projecting samples onto the new feature space
End of explanation
"""
if Version(sklearn_version) < '0.18':
from sklearn.lda import LDA
else:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr = lr.fit(X_train_lda, y_train)
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./images/lda3.png', dpi=300)
plt.show()
X_test_lda = lda.transform(X_test_std)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./images/lda4.png', dpi=300)
plt.show()
"""
Explanation: <br>
<br>
LDA via scikit-learn
End of explanation
"""
Image(filename='./images/05_11.png', width=500)
"""
Explanation: <br>
<br>
Using kernel principal component analysis for nonlinear mappings
End of explanation
"""
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
X_pc = np.column_stack((eigvecs[:, -i]
for i in range(1, n_components + 1)))
return X_pc
"""
Explanation: <br>
<br>
Implementing a kernel principal component analysis in Python
End of explanation
"""
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
plt.tight_layout()
# plt.savefig('./figures/half_moon_1.png', dpi=300)
plt.show()
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('./figures/half_moon_2.png', dpi=300)
plt.show()
from matplotlib.ticker import FormatStrFormatter
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3))
ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y==0, 0], np.zeros((50,1))+0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((50,1))-0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.tight_layout()
# plt.savefig('./figures/half_moon_3.png', dpi=300)
plt.show()
"""
Explanation: <br>
Example 1: Separating half-moon shapes
End of explanation
"""
from sklearn.datasets import make_circles
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
plt.tight_layout()
# plt.savefig('./figures/circles_1.png', dpi=300)
plt.show()
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('./figures/circles_2.png', dpi=300)
plt.show()
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('./figures/circles_3.png', dpi=300)
plt.show()
"""
Explanation: <br>
Example 2: Separating concentric circles
End of explanation
"""
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
lambdas: list
Eigenvalues
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
alphas = np.column_stack((eigvecs[:, -i]
for i in range(1, n_components + 1)))
# Collect the corresponding eigenvalues
lambdas = [eigvals[-i] for i in range(1, n_components + 1)]
return alphas, lambdas
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1)
x_new = X[-1]
x_new
x_proj = alphas[-1] # original projection
x_proj
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
# projection of the "new" datapoint
x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas)
x_reproj
plt.scatter(alphas[y == 0, 0], np.zeros((50)),
color='red', marker='^', alpha=0.5)
plt.scatter(alphas[y == 1, 0], np.zeros((50)),
color='blue', marker='o', alpha=0.5)
plt.scatter(x_proj, 0, color='black',
label='original projection of point X[25]', marker='^', s=100)
plt.scatter(x_reproj, 0, color='green',
label='remapped point X[25]', marker='x', s=500)
plt.legend(scatterpoints=1)
plt.tight_layout()
# plt.savefig('./figures/reproject.png', dpi=300)
plt.show()
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X[:-1, :], gamma=15, n_components=1)
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
# projection of the "new" datapoint
x_new = X[-1]
x_reproj = project_x(x_new, X[:-1], gamma=15, alphas=alphas, lambdas=lambdas)
plt.scatter(alphas[y[:-1] == 0, 0], np.zeros((50)),
color='red', marker='^', alpha=0.5)
plt.scatter(alphas[y[:-1] == 1, 0], np.zeros((49)),
color='blue', marker='o', alpha=0.5)
plt.scatter(x_reproj, 0, color='green',
label='new point [ 100.0, 100.0]', marker='x', s=500)
plt.legend(scatterpoints=1)
plt.scatter(alphas[y[:-1] == 0, 0], np.zeros((50)),
color='red', marker='^', alpha=0.5)
plt.scatter(alphas[y[:-1] == 1, 0], np.zeros((49)),
color='blue', marker='o', alpha=0.5)
plt.scatter(x_proj, 0, color='black',
label='some point [1.8713, 0.0093]', marker='^', s=100)
plt.scatter(x_reproj, 0, color='green',
label='new point [ 100.0, 100.0]', marker='x', s=500)
plt.legend(scatterpoints=1)
plt.tight_layout()
# plt.savefig('./figures/reproject.png', dpi=300)
plt.show()
"""
Explanation: <br>
<br>
Projecting new data points
End of explanation
"""
from sklearn.decomposition import KernelPCA
X, y = make_moons(n_samples=100, random_state=123)
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.scatter(X_skernpca[y == 0, 0], X_skernpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X_skernpca[y == 1, 0], X_skernpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.tight_layout()
# plt.savefig('./figures/scikit_kpca.png', dpi=300)
plt.show()
"""
Explanation: <br>
<br>
Kernel principal component analysis in scikit-learn
End of explanation
"""
|
google/starthinker | colabs/dataset.ipynb | apache-2.0 | !pip install git+https://github.com/google/starthinker
"""
Explanation: BigQuery Dataset
Create and permission a dataset in BigQuery.
License
Copyright 2020 Google LLC,
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Disclaimer
This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.
This code generated (see starthinker/scripts for possible source):
- Command: "python starthinker_ui/manage.py colab"
- Command: "python starthinker/tools/colab.py [JSON RECIPE]"
1. Install Dependencies
First install the libraries needed to execute recipes, this only needs to be done once, then click play.
End of explanation
"""
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
"""
Explanation: 2. Set Configuration
This code is required to initialize the project. Fill in required fields and press play.
If the recipe uses a Google Cloud Project:
Set the configuration project value to the project identifier from these instructions.
If the recipe has auth set to user:
If you have user credentials:
Set the configuration user value to your user credentials JSON.
If you DO NOT have user credentials:
Set the configuration client value to downloaded client credentials.
If the recipe has auth set to service:
Set the configuration service value to downloaded service credentials.
End of explanation
"""
FIELDS = {
'auth_write':'service', # Credentials used for writing data.
'dataset_dataset':'', # Name of Google BigQuery dataset to create.
'dataset_emails':[], # Comma separated emails.
'dataset_groups':[], # Comma separated groups.
}
print("Parameters Set To: %s" % FIELDS)
"""
Explanation: 3. Enter BigQuery Dataset Recipe Parameters
Specify the name of the dataset.
If dataset exists, it is inchanged.
Add emails and / or groups to add read permission.
CAUTION: Removing permissions in StarThinker has no effect.
CAUTION: To remove permissions you have to edit the dataset.
Modify the values below for your use case, can be done multiple times, then click play.
End of explanation
"""
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'dataset':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'dataset_dataset','kind':'string','order':1,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'emails':{'field':{'name':'dataset_emails','kind':'string_list','order':2,'default':[],'description':'Comma separated emails.'}},
'groups':{'field':{'name':'dataset_groups','kind':'string_list','order':3,'default':[],'description':'Comma separated groups.'}}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
"""
Explanation: 4. Execute BigQuery Dataset
This does NOT need to be modified unless you are changing the recipe, click play.
End of explanation
"""
|
dotsdl/msmbuilder | examples/bayesian-msm.ipynb | lgpl-2.1 | %matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from mdtraj.utils import timing
from msmbuilder.example_datasets import load_doublewell
from msmbuilder.cluster import NDGrid
from msmbuilder.msm import BayesianMarkovStateModel, MarkovStateModel
"""
Explanation: BayesianMarkovStateModel
This example demonstrates the class BayesianMarkovStateModel, which uses Metropolis Markov chain Monte Carlo (MCMC) to sample
over the posterior distribution of transition matrices, given the observed transitions in your dataset. This can be useful
for evaluating the uncertainty due to sampling in your dataset.
End of explanation
"""
trjs = load_doublewell(random_state=0)['trajectories']
plt.hist(np.concatenate(trjs), bins=50, log=True)
plt.ylabel('Frequency')
plt.show()
"""
Explanation: Load some double-well data
End of explanation
"""
clusterer = NDGrid(n_bins_per_feature=10)
mle_msm = MarkovStateModel(lag_time=100)
b_msm = BayesianMarkovStateModel(lag_time=100, n_samples=10000, n_steps=1000)
states = clusterer.fit_transform(trjs)
with timing('running mcmc'):
b_msm.fit(states)
mle_msm.fit(states)
plt.subplot(2, 1, 1)
plt.plot(b_msm.all_transmats_[:, 0, 0])
plt.axhline(mle_msm.transmat_[0, 0], c='k')
plt.ylabel('t_00')
plt.subplot(2, 1, 2)
plt.ylabel('t_23')
plt.xlabel('MCMC Iteration')
plt.plot(b_msm.all_transmats_[:, 2, 3])
plt.axhline(mle_msm.transmat_[2, 3], c='k')
plt.show()
plt.plot(b_msm.all_timescales_[:, 0], label='MCMC')
plt.axhline(mle_msm.timescales_[0], c='k', label='MLE')
plt.legend(loc='best')
plt.ylabel('Longest timescale')
plt.xlabel('MCMC iteration')
plt.show()
"""
Explanation: We'll discretize the space using 10 states
And the build one MSM using the MLE transition matrix estimator, and one with the Bayesian estimator
End of explanation
"""
clusterer = NDGrid(n_bins_per_feature=50)
mle_msm = MarkovStateModel(lag_time=100)
b_msm = BayesianMarkovStateModel(lag_time=100, n_samples=1000, n_steps=100000)
states = clusterer.fit_transform(trjs)
with timing('running mcmc (50 states)'):
b_msm.fit(states)
mle_msm.fit(states)
plt.plot(b_msm.all_timescales_[:, 0], label='MCMC')
plt.axhline(mle_msm.timescales_[0], c='k', label='MLE')
plt.legend(loc='best')
plt.ylabel('Longest timescale')
plt.xlabel('MCMC iteration')
plt.plot(b_msm.all_transmats_[:, 0, 0], label='MCMC')
plt.axhline(mle_msm.transmat_[0, 0], c='k', label='MLE')
plt.legend(loc='best')
plt.ylabel('t_00')
plt.xlabel('MCMC iteration')
"""
Explanation: Now lets try using 50 states
The MCMC sampling is a lot harder to converge
End of explanation
"""
|
pycrystem/pycrystem | doc/demos/05 Simulate Data - Strain Mapping.ipynb | gpl-3.0 | %matplotlib inline
import pyxem as pxm
import numpy as np
import hyperspy.api as hs
import diffpy.structure
from matplotlib import pyplot as plt
from pyxem.generators.indexation_generator import IndexationGenerator
from diffsims.generators.diffraction_generator import DiffractionGenerator
"""
Explanation: Strain Mapping
This tutorial demonstrates different routes to obtain strain maps from scanning electron diffraction data.
The code functionality is illustrated using synthetic data, which is first generated using pyxem. This synthetic data represents a simple cubic crystal that is distorted to a tetragonal stucture. The intention is for this to provide an easy to understand illustration of the code functionality rather than to model any physical system.
This functionaility has been checked to run in pyxem-0.13.0 (Jan 2021). Bugs are always possible, do not trust the code blindly, and if you experience any issues please report them here: https://github.com/pyxem/pyxem-demos/issues
Contents
<a href='#gen'> Setting up & Creating Synthetic Data</a>
<a href='#aff'> Image Affine Transform Based Mapping</a>
<a href='#vec'> Vector Based Mapping</a>
<a id='gen'></a>
1. Setting up & Creating Synthetic Data
Import pyxem and other required libraries
End of explanation
"""
latt = diffpy.structure.lattice.Lattice(3,3,3,90,90,90)
atom = diffpy.structure.atom.Atom(atype='Ni',xyz=[0,0,0],lattice=latt)
structure = diffpy.structure.Structure(atoms=[atom],lattice=latt)
"""
Explanation: Define a structure for the creation of synthetic data
End of explanation
"""
ediff = DiffractionGenerator(300.)
diffraction = ediff.calculate_ed_data(structure,
reciprocal_radius=5.,
max_excitation_error=0.025,
with_direct_beam=False)
"""
Explanation: Simulate an electron diffraction pattern
End of explanation
"""
diffraction.plot()
"""
Explanation: check we have some spots
End of explanation
"""
pattern = diffraction.get_diffraction_pattern(128,5)
plt.imshow(pattern)
"""
Explanation: and that they play nice with our "detector" configuration
End of explanation
"""
diffraction.calibration = 1e-2
pattern = diffraction.get_diffraction_pattern(128,5)
plt.imshow(pattern)
"""
Explanation: The reason this looks "off" is that we haven't calibrated our pattern, once we do so we get a nice clean pattern
End of explanation
"""
latt = diffpy.structure.lattice.Lattice(3+0.12,3+0.12,3,90,90,90)
atom = diffpy.structure.atom.Atom(atype='Ni',xyz=[0,0,0],lattice=latt)
structure_d = diffpy.structure.Structure(atoms=[atom],lattice=latt)
diffractiond = ediff.calculate_ed_data(structure_d, reciprocal_radius=5.,
max_excitation_error=0.025,
with_direct_beam=False)
diffractiond.calibration = 1e-2
patternd = diffractiond.get_diffraction_pattern(128,5)
"""
Explanation: Define a distorted structure and simulate diffraction
End of explanation
"""
dp = pxm.signals.ElectronDiffraction2D((np.asarray([[pattern,patternd],[pattern,pattern]])))
x_l = []
for x in [0, 0, -0.01, 0.02]:
x_s = np.eye(3)
x_s[0,0] += x
x_l.append(x_s)
angles = hs.signals.Signal2D(np.asarray(x_l).reshape(2,2,3,3))
dp = dp.apply_affine_transformation(D=angles,order=1,inplace=False)
dp.set_diffraction_calibration(1)
"""
Explanation: Copy the data and stitch patterns together with distortions applied to the patterns to make a 2x2 map
End of explanation
"""
dp.plot(cmap='inferno')
"""
Explanation: Plot the synthetic data to visualise distortions to be mapped
End of explanation
"""
from pyxem.generators.subpixelrefinement_generator import SubpixelrefinementGenerator
from pyxem.signals.tensor_field import *
from pyxem.generators.displacement_gradient_tensor_generator import *
"""
Explanation: <a id='vec'></a>
2. Vector Based Mapping
Import pyxem modules for vector based strain mapping
End of explanation
"""
dp.plot()
x_peak = [24,0]
y_peak = [0,-42]
"""
Explanation: Finding the two peaks to be used for strain mapping
End of explanation
"""
spg = SubpixelrefinementGenerator(dp, np.asarray([x_peak,y_peak]))
Vs = spg.center_of_mass_method(20)
"""
Explanation: Determine peak positions to subpixel precision
End of explanation
"""
D = get_DisplacementGradientMap(hs.signals.Signal2D(Vs), Vs.data[0,0])
# The warnings in this cell and the next one can be ignored and they will be removed in the next patch version
# see https://github.com/pyxem/pyxem/issues/716
strain_map = D.get_strain_maps()
strain_map.plot(cmap='seismic',vmax=0.04,vmin=-0.04)
"""
Explanation: Compare distorted and undistorted diffraction vectors to obtain a strain map
End of explanation
"""
|
psychemedia/parlihacks | notebooks/Text Scraping - Notes.ipynb | mit | from parse import parse
bigtext = '''\
From February 2016, as an author, payments from Head of Zeus Publishing; \
a client of Averbrook Ltd. Address: 45-47 Clerkenwell Green London EC1R 0HT, via Sheil Land, 52 Doughty Street. \
London WC1N 2LS. From October 2016 until July 2018, I will receive a regular payment \
of £13,000 per month (previously £11,000). Hours: 12 non-consecutive hrs per week. \
Any additional payments are listed below. (Updated 20 January 2016, 14 October 2016 and 2 March 2018)'''
#Extract the sentence containing the update dates
parse('{}(Updated {updated})', bigtext)['updated']
#Extract the phrase describing the hours
parse('{}Hours: {hours}.{}', bigtext)['hours']
"""
Explanation: Text Scraping
One of the things I learned early on about scraping web pages (often referred to as "screen scraping") is that it often amounts to trying to recreate databases that have been re-presented as web pages using HTML templates. For example:
display a database table as an HTML table in a web page;
display each row of a database as a templated HTML page.
The aim of the scrape in these cases might be as simple as pulling the table from the page and representing it as a dataframe, or trying to reverse engineer the HTML template that converts data to HTML into something that can extract the data from the HTML back as a row in a corresponding data table.
In the latter case, the scrape may proceed in a couple of ways. For example:
by trying to identify structural HTML tag elements that contain recognisable data items, retrieving the HTML tag element, then extracting the data value;
parsing the recognisable literal text displayed on the web page and trying to extract data items based on that (i.e. ignore the HTML structural eelements and go straight for the extracted text). For an example of this sort of parsing, see the r1chardj0n3s/parse Python package as applied to text pulled from a page using something like the kennethreitz/requests-html package.
In more general cases, however, such as when trying to abstract meaningful information from arbitrary, natural language, texts, we need to up our game and start to analyse the texts as natural language texts.
Entity Extraction
As an example, consider the following text:
From February 2016, as an author, payments from Head of Zeus Publishing; a client of Averbrook Ltd. Address: 45-47 Clerkenwell Green London EC1R 0HT, via Sheil Land, 52 Doughty Street. London WC1N 2LS. From October 2016 until July 2018, I will receive a regular payment of £13,000 per month (previously £11,000). Hours: 12 non-consecutive hrs per week. Any additional payments are listed below. (Updated 20 January 2016, 14 October 2016 and 2 March 2018)
To a human reader, we can identify various structural patterns, as well as parsing the natural language sentences.
Let's start with some of the structural patterns:
End of explanation
"""
#Import the spacy package
import spacy
#The package parses lanuage according to different statistically trained models
#Let's load in the basic English model:
nlp = spacy.load('en')
#Generate a version of the text annotated using features detected by the model
doc = nlp(bigtext)
"""
Explanation: There also look to be sentences that might be standard sentences, such as Any additional payments are listed below.
From Web Scraping to Text-Scraping Using Natural Language Processing
Within the text are things that we might recognise as company names, dates, or addresses. Entity recognition refers to a natural language processing technique that attempts to extract words that describe "things", that is, entities, as well as identifying what sorts of "thing", or entity, they are.
One powerful Python natural language processing package, spacy, has an entity recognition capability. Let's see how to use it and what sort of output it produces:
End of explanation
"""
list(doc.sents)
ents = list(doc.ents)
entTypes = []
for entity in ents:
entTypes.append(entity.label_)
print(entity, '::', entity.label_)
for entType in set(entTypes):
print(entType, spacy.explain(entType))
"""
Explanation: The parsed text is annotated in a variety of ways.
For example, we can directly access all the sentences in the original text:
End of explanation
"""
for token in doc[:15]:
print('::'.join([token.text, token.ent_type_,token.ent_iob_]) )
"""
Explanation: We can also look at each of the tokens in text and identify whether it is part of a entity, and if so, what sort. The .iob_ attributes identifies O as not part of an entity, B as the first token in an entity, and I as continuing part of an entity.
End of explanation
"""
from dateutil import parser as dtparser
[(d, dtparser.parse(d.string)) for d in ents if d.label_ == 'DATE']
#see also https://github.com/akoumjian/datefinder
#datefinder - Find dates inside text using Python and get back datetime objects
"""
Explanation: Looking at the extracted entities, we see we get some good hits:
Averbrook Ltd. is an ORG;
20 January 2016 and 14 October 2016 are both instances of a DATE
Some near misses:
Zeus Publishing isn't a PERSON, although we might see why it has been recognised as such. (Could we overlay the model with an additional mapping of if PERSON and endswith.in(['Publishing', 'Holdings']) -> ORG ?)
And some things that are mis-categorised:
52 Doughty Street isn't really meaningful as a QUANTITY.
Several things we might usefully want to categorise - such as a UK postcode, for example, which might be useful in and of itself, or when helping us to identify an address - is not recognised as an entity.
Things recognised as dates we might want to then further parse as date object types:
End of explanation
"""
for token in doc[:15]:
print(token, '::', token.shape_)
"""
Explanation: Token Shapes
As well as indentifying entities, spacy analyses texts at several othr levels. One such level of abstraction is the "shape" of each token. This identifies whether or not a character is an upper or lower case alphabetic character, a digit, or a punctuation character (which appears as itself):
End of explanation
"""
[pc.shape_ for pc in nlp('MK7 6AA, SW1A 1AA, N7 6BB')]
"""
Explanation: Scraping a Text Based on Its Shape Structure And Adding New Entity Types
The "shape" of a token provides an additional structural item that we might be able to make use of in scrapers of the raw text.
For example, writing an efficient regular expression to identify a UK postcode can be a difficult task, but we can start to cobble one together from the shapes of different postcodes written in "standard" postcode form:
End of explanation
"""
from spacy.matcher import Matcher
nlp = spacy.load('en')
matcher = Matcher(nlp.vocab)
matcher.add('POSTCODE', None,
[{'SHAPE':'XXdX'}, {'SHAPE':'dXX'}],
[{'SHAPE':'XXd'}, {'SHAPE':'dXX'}],
[{'SHAPE':'Xd'}, {'SHAPE':'dXX'}])
"""
Explanation: We can define a matcher function that will identify the tokens in a document that match a particular ordered combination of shape patterns.
For example, the postcode like things described above have the shapes:
XXd dXX
XXdX dXX
Xd dXX
We can use these structural patterns to identify token pairs as possible postcodes.
End of explanation
"""
pcdoc = nlp('pc is WC1N 4CC okay, as is MK7 4AA and Sir James Smith and Lady Jane Grey are presumably persons.')
matches = matcher(pcdoc)
#See what we matched, and let's see what entities we have detected
print('Matches: {}\nEntities: {}'.format([pcdoc[m[1]:m[2]] for m in matches], [(m,m.label_) for m in pcdoc.ents]))
"""
Explanation: Let's test that:
End of explanation
"""
##Define a POSTCODE as a new entity type by adding matched postcodes to the doc.ents
#https://stackoverflow.com/a/47799669
nlp = spacy.load('en')
matcher = Matcher(nlp.vocab)
def add_entity_label(matcher, doc, i, matches):
match_id, start, end = matches[i]
doc.ents += ((match_id, start, end),)
#Recognise postcodes from different shapes
matcher.add('POSTCODE', add_entity_label, [{'SHAPE': 'XXdX'},{'SHAPE':'dXX'}], [{'SHAPE':'XXd'},{'SHAPE':'dXX'}])
pcdoc = nlp('pc is WC1N 4CC okay, as is MK7 4AA and James Smith is presumably a person')
matches = matcher(pcdoc)
print('Matches: {}\nEntities: {}'.format([pcdoc[m[1]:m[2]] for m in matches], [(m,m.label_) for m in pcdoc.ents]))
"""
Explanation: Adding a new entity type with a matcher callback
The matcher seems to have matched the postcodes, but is not identifying them as entities. (We also note that the entity matcher has missed the "Sir" title. In some cases, it might also match a postcode as a person.)
To add the matched items to the entity list, we need to add a callback function to the matcher.
End of explanation
"""
bigtext
#Generate base tagged doc
doc = nlp(bigtext)
#Run postcode tagger over the doc
_ = matcher(doc)
"""
Explanation: Let's put those pieces together more succinctly:
End of explanation
"""
from spacy import displacy
displacy.render(doc, jupyter=True, style='ent')
"""
Explanation: The tagged document should now include POSTCODE entities. One of the easiest ways to check the effectiveness of a new entity tagger is to check the document with recognised entities visualised within it.
The displacy package has a Jupyter enabled visualiser for doing just that.
End of explanation
"""
import pandas as pd
mpdata=pd.read_csv('members_mar18.csv')
mpdata.head(5)
"""
Explanation: Matching A Large Number of Phrases
If we have a large number of phrases that are examples of a particular (new) entity type, we can match them using a PhraseMatcher.
For example, suppose we have a table of MP data:
End of explanation
"""
term_list = mpdata['list_name'].tolist()
term_list[:5]
"""
Explanation: From this, we can extract a list of MP names, albeit in reverse word order.
End of explanation
"""
from spacy.matcher import PhraseMatcher
nlp = spacy.load('en')
matcher = PhraseMatcher(nlp.vocab)
patterns = [nlp(text) for text in term_list]
matcher.add('MP', add_entity_label, *patterns)
"""
Explanation: If we wanted to match those names as "MP" entities, we could use the following recipe to add an MP entity type that will be returned if any of the MP names are matched:
End of explanation
"""
doc = nlp("The MPs were Adams, Nigel, Afolami, Bim and Abbott, Ms Diane.")
matches = matcher(doc)
displacy.render(doc, jupyter=True, style='ent')
"""
Explanation: Let's test that new entity on a test string:
End of explanation
"""
import re
#https://stackoverflow.com/a/164994/454773
regex_ukpc = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y][0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]))))\s?[0-9][A-Za-z]{2})'
#Based on https://spacy.io/usage/linguistic-features
nlp = spacy.load('en')
doc = nlp("The postcodes were MK1 6AA and W1A 1AA.")
for match in re.finditer(regex_ukpc, doc.text):
start, end = match.span() # get matched indices
entity = doc.char_span(start, end, label='POSTCODE') # create Span from indices
doc.ents = list(doc.ents) + [entity]
entity.merge()
displacy.render(doc, jupyter=True, style='ent')
"""
Explanation: Matching a Regular Expression
Sometimes we may want to use a regular expression as an entity detector. For example, we might want to tighten up the postcode entity detectio by using a regular expression, rather than shape matching.
End of explanation
"""
nlp('pc is WC1N 4CC okay, as is MK7 4AA and Sir James Smith and Lady Jane Grey are presumably persons').ents
"""
Explanation: Updating the training of already existing Entities
We note previously that the matcher was missing the "Sir" title on matched persons.
End of explanation
"""
# training data
TRAIN_DATA = [
('Received from Sir John Smith last week.', {
'entities': [(14, 28, 'PERSON')]
}),
('Sir Richard Jones is another person', {
'entities': [(0, 18, 'PERSON')]
})
]
"""
Explanation: Let's see if we can update the training of the model so that it does recognise the "Sir" title as part of a person's name.
We can do that by creating some new training data and using it to update the model. The entities dict identifies the index values in the test string that delimit the entity we want to extract.
End of explanation
"""
import random
#model='en' #'en_core_web_sm'
#nlp = spacy.load(model)
cycles=20
optimizer = nlp.begin_training()
for i in range(cycles):
random.shuffle(TRAIN_DATA)
for txt, annotations in TRAIN_DATA:
nlp.update([txt], [annotations], sgd=optimizer)
nlp('pc is WC1N 4CC okay, as is MK7 4AA and Sir James Smith and Lady Jane Grey are presumably persons').ents
"""
Explanation: In this case, we are going to let spacy learn its own patterns, as a statistical model, that will - if the learning pays off correctly - identify things like "Sir Bimble Bobs" as a PERSON entity.
End of explanation
"""
#Find multiple matches using .find()
#https://stackoverflow.com/a/4665027/454773
def _find_all(string, substring):
#Generator to return index of each string match
start = 0
while True:
start = string.find(substring, start)
if start == -1: return
yield start
start += len(substring)
def find_all(string, substring):
return list(_find_all(string, substring))
#Find multiple matches using a regular expression
#https://stackoverflow.com/a/4664889/454773
import re
def refind_all(string, substring):
return [m.start() for m in re.finditer(substring, string)]
txt = 'This is a string.'
substring = 'is'
print( find_all(txt, substring) )
print( refind_all(txt, substring) )
"""
Explanation: One of the things that can be a bit fiddly is generating the training strings. We ca produce a little utility function that will help us create a training pattern by identifying the index value(s) associated with a particular substring, that we wish to identify as an example of a particular entity type, inside a text string.
The first thing we need to do is find the index values within a string that show where a particular substring can be found. The Python find() and index() methods will find the first location of a substring in a string. However, where a substring appears several times in a sring, we need a new function to identify all the locations. There are several ways of doing this...
End of explanation
"""
def trainingTupleBuilder(string, substring, typ, entities=None):
ixs = refind_all(string, substring)
offset = len(substring)
if entities is None: entities = {'entities':[]}
for ix in ixs:
entities['entities'].append( (ix, ix+offset, typ) )
return (string, entities)
#('Received from Sir John Smith last week.', {'entities': [(14, 28, 'PERSON')]})
trainingTupleBuilder('Received from Sir John Smith last week.','Sir John Smith','PERSON')
"""
Explanation: We can use either of these functions to find the location of a substring in a string, and then use these index values to help us create our training data.
End of explanation
"""
TRAIN_DATA = []
TRAIN_DATA.append(trainingTupleBuilder("He lives at 27, Oswaldtwistle Way, Birmingham",'27, Oswaldtwistle Way, Birmingham','B-ADDRESS'))
TRAIN_DATA.append(trainingTupleBuilder("Payments from Boondoggle Limited, 377, Hope Street, Little Village, Halifax. Received: October, 2017",'377, Hope Street, Little Village, Halifax','B-ADDRESS'))
TRAIN_DATA
"""
Explanation: Training a Simple Model to Recognise Addresses
As well as extracting postcodes as entities, could we also train a simple model to extract addresses?
End of explanation
"""
#https://spacy.io/usage/training
def spacytrainer(model=None, output_dir=None, n_iter=100, debug=False):
"""Load the model, set up the pipeline and train the entity recognizer."""
if model is not None:
if isinstance(model,str):
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
#Else we assume we have passed in an nlp model
else: nlp = model
else:
nlp = spacy.blank('en') # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe('ner')
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get('entities'):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in TRAIN_DATA:
nlp.update(
[text], # batch of texts
[annotations], # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
sgd=optimizer, # callable to update weights
losses=losses)
if debug: print(losses)
# test the trained model
if debug:
for text, _ in TRAIN_DATA:
doc = nlp(text)
print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
for text, _ in TRAIN_DATA:
doc = nlp2(text)
print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])
return nlp
"""
Explanation: The B- prefix identifies the entity as a multi-token entity.
End of explanation
"""
nlp = spacytrainer('en')
#See if we can identify the address
addr_doc = nlp(text)
displacy.render(addr_doc , jupyter=True, style='ent')
"""
Explanation: Let's update the en model to include a really crude address parser based on the two lines of training data described above.
End of explanation
"""
tags = []
for token in doc[:15]:
print(token, '::', token.pos_, '::', token.tag_)
tags.append(token.tag_)
"""
Explanation: Parts of Speech (POS)
As well as recognising different types of entity, which may be identified across several different words, the spacy parser also marks up each separate word (or token) as a particular "part-of-speech" (POS), such as a noun, verb, or adjective.
Parts of speech are identified as .pos_ or .tag_ token attributes.
End of explanation
"""
for tag in set(tags):
print(tag, '::', spacy.explain(tag))
"""
Explanation: An explain() function describes each POS type in natural language terms:
End of explanation
"""
for chunk in doc.noun_chunks:
print(' :: '.join([chunk.text, chunk.root.text, chunk.root.dep_,
chunk.root.head.text]))
"""
Explanation: We can also get a list of "noun chunks" identified in the text, as well as other words they relate to in a sentence:
End of explanation
"""
import textacy
list(textacy.extract.pos_regex_matches(nlp(text),r'<NOUN> <ADP> <PROPN|ADP>+'))
textacy.constants.POS_REGEX_PATTERNS
xx='A sum of £2000-3000 last or £2,000 or £2000-£3000 or £2,000-£3,000 year'
for t in nlp(xx):
print(t,t.tag_, t.pos_)
for e in nlp(xx).ents:
print(e, e.label_)
list(textacy.extract.pos_regex_matches(nlp('A sum of £2000-3000 last or £2,000 or £2000-£3000 or £2,000-£3,000 year'),r'<SYM><NUM><SYM>?<NUM>?'))
"""
Explanation: Scraping a Text Based on Its POS Structure - textacy
As well as the basic spacy functionality, packages exist that build on spacy to provide further tools for working with abstractions identified using spacy.
For example, the textacy package provides a way of parsing sentences using regular expressions defined over (Ontonotes5?) POS tags:
End of explanation
"""
#define approx amount eg £10,000-£15,000 or £10,000-15,000
parse('{}£{a}-£{b:g}{}','eg £10,000-£15,000 or £14,000-£16,000'.replace(',',''))
"""
Explanation: If we can define appropriate POS pattern, we can extract terms from an arbitrary text based on that pattern, an approach that is far more general than trying to write a regular expression pattern matcher over just the raw text.
End of explanation
"""
nlp = spacy.load('en')
matcher = Matcher(nlp.vocab)
matcher.add('POSTCODE', add_entity_label, [{'SHAPE': 'XXdX'},{'SHAPE':'dXX'}], [{'SHAPE':'XXd'},{'SHAPE':'dXX'}])
matcher.add('ADDRESS', add_entity_label,
[{'POS':'NUM','OP':'+'},{'POS':'PROPN','OP':'+'}, {'ENT_TYPE':'POSTCODE', 'OP':'+'}],
[{'ENT_TYPE':'GPE','OP':'+'}, {'ENT_TYPE':'POSTCODE', 'OP':'+'}])
addr_doc = nlp(text)
matcher(addr_doc)
displacy.render(addr_doc , jupyter=True, style='ent')
for m in matcher(addr_doc):
print(addr_doc[m[1]:m[2]])
print([(e, e.label_) for e in addr_doc.ents])
"""
Explanation: More Complex Matching Rules
Matchers can be created over a wide range of attributes (docs), including POS tags and entity labels.
For example, we can start trying to build an address tagger by looking for things that end with a postcode.
End of explanation
"""
for sent in nlp(text).sents:
print(sent,'\n')
for token in sent:
print(token, ': ', str(list(token.children)))
print()
"""
Explanation: In this case, we note that the visualiser cannot cope with rendering multiple entity types over one or more words. In the above example, the POSTCODE entitites are highlighted, but we note from the matcher that ADDRESS ranges are also identified that extend across entities defined over fewer terms.
Visualising - displaCy
We can look at the structure of a text by printing out the child elements associated with each token in a sentence:
End of explanation
"""
from spacy import displacy
displacy.render(doc, jupyter=True,style='dep')
displacy.render(doc, jupyter=True,style='dep',options={'distance':85, 'compact':True})
"""
Explanation: However, the displaCy toolset, included as part of spacy, provides a more appealing way of visualising parsed documents in two different ways:
as a dependency graph, showing POS tags for each token and how they relate to each other;
as a text display with extracted entities highlighted.
The dependency graph identifies POS tags as well as how tokens are related in natural language grammatical phrases:
End of explanation
"""
displacy.render(pcdoc, jupyter=True,style='ent')
displacy.render(doc, jupyter=True,style='ent')
"""
Explanation: We can also use displaCy to highlight, inline, the entities extracted from a text.
End of explanation
"""
mpdata=pd.read_csv('members_mar18.csv')
tmp = mpdata.to_dict(orient='record')
mpdatadict = {k['list_name']:k for k in tmp }
#via https://spacy.io/usage/processing-pipelines
mpdata=pd.read_csv('members_mar18.csv')
"""Example of a spaCy v2.0 pipeline component to annotate MP record with MNIS data"""
from spacy.tokens import Doc, Span, Token
class RESTMPComponent(object):
"""spaCy v2.0 pipeline component that annotates MP entity with MP data.
"""
name = 'mp_annotator' # component name, will show up in the pipeline
def __init__(self, nlp, label='MP'):
"""Initialise the pipeline component. The shared nlp instance is used
to initialise the matcher with the shared vocab, get the label ID and
generate Doc objects as phrase match patterns.
"""
# Get MP data
mpdata=pd.read_csv('members_mar18.csv')
mpdatadict = mpdata.to_dict(orient='record')
# Convert MP data to a dict keyed by MP name
self.mpdata = {k['list_name']:k for k in mpdatadict }
self.label = nlp.vocab.strings[label] # get entity label ID
# Set up the PhraseMatcher with Doc patterns for each MP name
patterns = [nlp(c) for c in self.mpdata.keys()]
self.matcher = PhraseMatcher(nlp.vocab)
self.matcher.add('MPS', None, *patterns)
# Register attribute on the Token. We'll be overwriting this based on
# the matches, so we're only setting a default value, not a getter.
# If no default value is set, it defaults to None.
Token.set_extension('is_mp', default=False)
Token.set_extension('mnis_id')
Token.set_extension('constituency')
Token.set_extension('party')
# Register attributes on Doc and Span via a getter that checks if one of
# the contained tokens is set to is_country == True.
Doc.set_extension('is_mp', getter=self.is_mp)
Span.set_extension('is_mp', getter=self.is_mp)
def __call__(self, doc):
"""Apply the pipeline component on a Doc object and modify it if matches
are found. Return the Doc, so it can be processed by the next component
in the pipeline, if available.
"""
matches = self.matcher(doc)
spans = [] # keep the spans for later so we can merge them afterwards
for _, start, end in matches:
# Generate Span representing the entity & set label
entity = Span(doc, start, end, label=self.label)
spans.append(entity)
# Set custom attribute on each token of the entity
# Can be extended with other data associated with the MP
for token in entity:
token._.set('is_mp', True)
token._.set('mnis_id', self.mpdata[entity.text]['member_id'])
token._.set('constituency', self.mpdata[entity.text]['constituency'])
token._.set('party', self.mpdata[entity.text]['party'])
# Overwrite doc.ents and add entity – be careful not to replace!
doc.ents = list(doc.ents) + [entity]
for span in spans:
# Iterate over all spans and merge them into one token. This is done
# after setting the entities – otherwise, it would cause mismatched
# indices!
span.merge()
return doc # don't forget to return the Doc!
def is_mp(self, tokens):
"""Getter for Doc and Span attributes. Returns True if one of the tokens
is an MP."""
return any([t._.get('is_mp') for t in tokens])
# For simplicity, we start off with only the blank English Language class
# and no model or pre-defined pipeline loaded.
nlp = spacy.load('en')
rest_mp = RESTMPComponent(nlp) # initialise component
nlp.add_pipe(rest_mp) # add it to the pipeline
doc = nlp(u"Some text about MPs Abbott, Ms Diane and Afriyie, Adam")
print('Pipeline', nlp.pipe_names) # pipeline contains component name
print('Doc has MPs', doc._.is_mp) # Doc contains MPs
for token in doc:
if token._.is_mp:
print(token.text, '::', token._.constituency,'::', token._.party,
'::', token._.mnis_id) # MP data
print('Entities', [(e.text, e.label_) for e in doc.ents]) # entities
"""
Explanation: Extending Entities
eg add a flag to say a person is an MP
End of explanation
"""
|
dolittle007/dolittle007.github.io | notebooks/GLM-robust.ipynb | gpl-3.0 | %matplotlib inline
import pymc3 as pm
import matplotlib.pyplot as plt
import numpy as np
import theano
"""
Explanation: GLM: Robust Linear Regression
Author: Thomas Wiecki
This tutorial first appeard as a post in small series on Bayesian GLMs on my blog:
The Inference Button: Bayesian GLMs made easy with PyMC3
This world is far from Normal(ly distributed): Robust Regression in PyMC3
The Best Of Both Worlds: Hierarchical Linear Regression in PyMC3
In this blog post I will write about:
How a few outliers can largely affect the fit of linear regression models.
How replacing the normal likelihood with Student T distribution produces robust regression.
How this can easily be done with PyMC3 and its new glm module by passing a family object.
This is the second part of a series on Bayesian GLMs (click here for part I about linear regression). In this prior post I described how minimizing the squared distance of the regression line is the same as maximizing the likelihood of a Normal distribution with the mean coming from the regression line. This latter probabilistic expression allows us to easily formulate a Bayesian linear regression model.
This worked splendidly on simulated data. The problem with simulated data though is that it's, well, simulated. In the real world things tend to get more messy and assumptions like normality are easily violated by a few outliers.
Lets see what happens if we add some outliers to our simulated data from the last post.
Again, import our modules.
End of explanation
"""
size = 100
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
# y = a + b*x
true_regression_line = true_intercept + true_slope * x
# add noise
y = true_regression_line + np.random.normal(scale=.5, size=size)
# Add outliers
x_out = np.append(x, [.1, .15, .2])
y_out = np.append(y, [8, 6, 9])
data = dict(x=x_out, y=y_out)
"""
Explanation: Create some toy data but also add some outliers.
End of explanation
"""
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, xlabel='x', ylabel='y', title='Generated data and underlying model')
ax.plot(x_out, y_out, 'x', label='sampled data')
ax.plot(x, true_regression_line, label='true regression line', lw=2.)
plt.legend(loc=0);
"""
Explanation: Plot the data together with the true regression line (the three points in the upper left corner are the outliers we added).
End of explanation
"""
with pm.Model() as model:
pm.glm.GLM.from_formula('y ~ x', data)
trace = pm.sample(2000, njobs=2)
"""
Explanation: Robust Regression
Lets see what happens if we estimate our Bayesian linear regression model using the glm() function as before. This function takes a Patsy string to describe the linear model and adds a Normal likelihood by default.
End of explanation
"""
plt.figure(figsize=(7, 5))
plt.plot(x_out, y_out, 'x', label='data')
pm.plot_posterior_predictive_glm(trace, samples=100,
label='posterior predictive regression lines')
plt.plot(x, true_regression_line,
label='true regression line', lw=3., c='y')
plt.legend(loc=0);
"""
Explanation: To evaluate the fit, I am plotting the posterior predictive regression lines by taking regression parameters from the posterior distribution and plotting a regression line for each (this is all done inside of plot_posterior_predictive()).
End of explanation
"""
normal_dist = pm.Normal.dist(mu=0, sd=1)
t_dist = pm.StudentT.dist(mu=0, lam=1, nu=1)
x_eval = np.linspace(-8, 8, 300)
plt.plot(x_eval, theano.tensor.exp(normal_dist.logp(x_eval)).eval(), label='Normal', lw=2.)
plt.plot(x_eval, theano.tensor.exp(t_dist.logp(x_eval)).eval(), label='Student T', lw=2.)
plt.xlabel('x')
plt.ylabel('Probability density')
plt.legend();
"""
Explanation: As you can see, the fit is quite skewed and we have a fair amount of uncertainty in our estimate as indicated by the wide range of different posterior predictive regression lines. Why is this? The reason is that the normal distribution does not have a lot of mass in the tails and consequently, an outlier will affect the fit strongly.
A Frequentist would estimate a Robust Regression and use a non-quadratic distance measure to evaluate the fit.
But what's a Bayesian to do? Since the problem is the light tails of the Normal distribution we can instead assume that our data is not normally distributed but instead distributed according to the Student T distribution which has heavier tails as shown next (I read about this trick in "The Kruschke", aka the puppy-book; but I think Gelman was the first to formulate this).
Lets look at those two distributions to get a feel for them.
End of explanation
"""
with pm.Model() as model_robust:
family = pm.glm.families.StudentT()
pm.glm.GLM.from_formula('y ~ x', data, family=family)
trace_robust = pm.sample(2000, njobs=2)
plt.figure(figsize=(7, 5))
plt.plot(x_out, y_out, 'x')
pm.plot_posterior_predictive_glm(trace_robust,
label='posterior predictive regression lines')
plt.plot(x, true_regression_line,
label='true regression line', lw=3., c='y')
plt.legend();
"""
Explanation: As you can see, the probability of values far away from the mean (0 in this case) are much more likely under the T distribution than under the Normal distribution.
To define the usage of a T distribution in PyMC3 we can pass a family object -- T -- that specifies that our data is Student T-distributed (see glm.families for more choices). Note that this is the same syntax as R and statsmodels use.
End of explanation
"""
|
guruucsd/EigenfaceDemo | python/Gini coefficient.ipynb | mit | target=array([1,4,8,5])
output=array([1,8,4,5])
"""
Explanation: Gini coefficient
Gini coefficient is a measure of statistical dispersion. For the Kaggle competition, the normalized Gini coefficient is used as a measure of comparing how much the ordering of the model prediction matches the actual output. The magnitudes of the prediction do matter, but not in the same way they do in regular regressions.
The Gini coefficient is calculated as follows. As an example, let's say we have the following target values and model output (predictions):
End of explanation
"""
sort_index=argsort(-output) # Because we want to sort from largest to smallest
print(sort_index)
"""
Explanation: In the above example, the prediction output is not perfect, because we have the 4 and 8 switched. Regardless, we first sort the output from the largest to the smallest and calculate the sort ordering:
End of explanation
"""
sorted_target=target[sort_index]
print(sorted_target)
"""
Explanation: Next, we sort the target values using this sorting order. Since the predicted order was incorrect, the target values are not going to be sorted by largest to the smallest.
End of explanation
"""
cumsum_target=cumsum(sorted_target)
print(cumsum_target)
cumsum_target_ratio=cumsum_target / asarray(target.sum(), dtype=float) # Convert to float type
print(cumsum_target_ratio)
"""
Explanation: Then we look at the cumulative sum, and divide by the total sum to get the proportion of the cumulative sum.
End of explanation
"""
xs=linspace(0, 1, len(cumsum_target_ratio) + 1)
plt.plot(xs, c_[xs, r_[0, cumsum_target_ratio]])
plt.gca().set_aspect('equal')
plt.gca().set_xlabel(r'% from left of array')
plt.gca().set_ylabel(r'% cumsum')
"""
Explanation: Let's plot cumsum_target_ratio:
End of explanation
"""
gini_coeff=(r_[0, cumsum_target_ratio] - xs).sum()
print(gini_coeff)
"""
Explanation: cumsum_target_ratio was plotted in green, whereas The line for $y = x$ was also plotted in blue. This line represents the random model prediction. If we had a large array of numbers, sorted it randomly and looked at the cumulative sum from the left, we would expect the cumulative sum to be about 10% of the total when we look at the number 10% from the left. In general, we would expect $x$ % of the cumulative sum total for the array element that is $x$ % from the left of the array.
Finally, the Gini coefficient is determined to be the "area" between the green and the blue lines: green values minus the blue line values. (This can also be negative in some places, as we see above; hence the quotation marks.)
End of explanation
"""
def gini_coeff(target, output):
sort_index=argsort(-output) # Because we want to sort from largest to smallest
sorted_target=target[sort_index]
cumsum_target=cumsum(sorted_target)
cumsum_target_ratio=cumsum_target / asarray(target.sum(), dtype=float) # Convert to float type
xs = linspace(0, 1, len(cumsum_target_ratio) + 1)
return (r_[0, cumsum_target_ratio] - xs).sum()
print(gini_coeff(target, output))
"""
Explanation: For convenience, we collect the above in a function.
End of explanation
"""
print(gini_coeff(target, target))
"""
Explanation: Note that we can also calculate the Gini coefficient of two same vectors. In this case, it returns the maximum value that can be achievable by any sorting of the same set of numbers:
End of explanation
"""
def normalized_gini(target, output):
return gini_coeff(target, output) / gini_coeff(target, target)
print(normalized_gini(target, output))
"""
Explanation: Finally, the normalized Gini coefficient is defined as the ratio of Gini coefficient between the target and the prediction with respect to the maximum value achievable from the target values themselves:
End of explanation
"""
print(normalized_gini(target, target))
"""
Explanation: The normalized Gini coefficient has the maximum of 1, when the ordering is correct.
End of explanation
"""
target=array([1,4,8,5])
output2=array([5,8,4,1])
print(normalized_gini(target, output2))
"""
Explanation: The model prediction is considered better the closer it is to 1. It appears that this number can become negative, though, if the prediction is very bad (the opposite ordering, for example):
End of explanation
"""
target=array([1,4,8,5])
output3=array([10,80,40,50])
output4=array([0,3,1,2])
print(normalized_gini(target, output3))
print(normalized_gini(target, output4))
"""
Explanation: This measure is insensitve to the magnitudes:
End of explanation
"""
target_large=array([1,2,1,2,1,2,1,2,1,2,9])
output_small=array([2,1,2,1,2,1,2,1,2,1,8]) # All 1, 2 s are wrong, but got the largest number right
output_large=array([1,2,1,2,1,2,1,2,1,6,2]) # Got most 1, 2 s right, but missed the largest number
print('output_small RMSE: %f' % sqrt((target_large-output_small)**2).mean())
print('output_large RMSE: %f' % sqrt((target_large-output_large)**2).mean())
print('output_small normalized Gini: %f' % normalized_gini(target_large, output_small))
print('output_large normalized Gini: %f' % normalized_gini(target_large, output_large))
"""
Explanation: However, because we are sorting from the largest to the smallest number (and looking at the ratio of the largest to the total), it is more important to predict the samples with large numbers.
To wit, here're two sets of predictions:
End of explanation
"""
|
evanmiltenburg/python-for-text-analysis | Assignments-colab/ASSIGNMENT_2.ipynb | apache-2.0 | %%capture
!wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Data.zip
!wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/images.zip
!wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Extra_Material.zip
!unzip Data.zip -d ../
!unzip images.zip -d ./
!unzip Extra_Material.zip -d ../
!rm Data.zip
!rm Extra_Material.zip
!rm images.zip
"""
Explanation: <a href="https://colab.research.google.com/github/cltl/python-for-text-analysis/blob/colab/Assignments-colab/ASSIGNMENT_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
End of explanation
"""
for number in range(4, 0, -1): # change 4 to 99 when you're done with debugging
print(number, 'bottles of beer on the wall,')
"""
Explanation: Assignment 2: Containers
Deadline: Tuesday, September 21, 2021 before 20:00
Please name your files:
ASSIGNMENT_2_FIRSTNAME_LASTNAME.ipynb
assignment2_utils.py
Please store the two files in a folder called ASSIGNMENT_2_FIRSTNAME_LASTNAME
Please zip your folder and please follow the following naming convention for the zip file: ASSIGNMENT_2_FIRSTNAME_LASTNAME.zip
Please submit your assignment on Canvas: Assignment 2
If you have questions about this topic, please contact us (cltl.python.course@gmail.com). Questions and answers will be collected on Piazza, so please check if your question has already been answered first.
In this block, we covered the following chapters:
- Chapter 05 - Core concepts of containers
- Chapter 06 - Lists
- Chapter 07 - Sets
- Chapter 08 - Comparison of lists and sets
- Chapter 09 - Looping over containers.
- Chapter 10 - Dictionaries
- Chapter 11 - Functions and scope
In this assignment, you will be asked to show what you have learned from the topics above!
Finding solutions online
Very often, you can find good solutions online. We encourage you to use online resources when you get stuck. However, please always try to understand the code you find and indicate that it is not your own. Use the following format to mark code written by someone else:
Taken from [link] [date]
[code]
###
Please use a similar format to indicate that you have worked with a classmate (e.g. mention the name instead of the link).
Please stick to this strategy for all course assignments.
Exercise 1: Beersong
99 Bottles of Beer is a traditional song in the United States and Canada. Write a python program that generates the lyrics to the song.
The song's simple lyrics are as follows:
99 bottles of beer on the wall,
99 bottles of beer.
Take one down, pass it around,
98 bottles of beer on the wall.
The same verse is repeated, each time with one fewer bottle. The song
is completed when the singer or singers reach zero. After the last bottle
is taken down and passed around, there is a special verse:
No more bottles of beer on the wall,
no more bottles of beer.
Go to the store and buy some more,
99 bottles of beer on the wall.
Notes:
Leave a blank line between verses.
Make sure that you print the singular form of "bottles" when the counter is at one.
Hint:
While debugging the program, start from a small number, and
change it to 100 when you are done (as shown below).
Use variables to prevent code repetition
You can use the following code snippet as a start:
End of explanation
"""
a_story = """In a far away kingdom, there was a river. This river was home to many golden swans. The swans spent most of their time on the banks of the river. Every six months, the swans would leave a golden feather as a fee for using the lake. The soldiers of the kingdom would collect the feathers and deposit them in the royal treasury.
One day, a homeless bird saw the river. "The water in this river seems so cool and soothing. I will make my home here," thought the bird.
As soon as the bird settled down near the river, the golden swans noticed her. They came shouting. "This river belongs to us. We pay a golden feather to the King to use this river. You can not live here."
"I am homeless, brothers. I too will pay the rent. Please give me shelter," the bird pleaded. "How will you pay the rent? You do not have golden feathers," said the swans laughing. They further added, "Stop dreaming and leave once." The humble bird pleaded many times. But the arrogant swans drove the bird away.
"I will teach them a lesson!" decided the humiliated bird.
She went to the King and said, "O King! The swans in your river are impolite and unkind. I begged for shelter but they said that they had purchased the river with golden feathers."
The King was angry with the arrogant swans for having insulted the homeless bird. He ordered his soldiers to bring the arrogant swans to his court. In no time, all the golden swans were brought to the King’s court.
"Do you think the royal treasury depends upon your golden feathers? You can not decide who lives by the river. Leave the river at once or you all will be beheaded!" shouted the King.
The swans shivered with fear on hearing the King. They flew away never to return. The bird built her home near the river and lived there happily forever. The bird gave shelter to all other birds in the river. """
print(a_story)
another_story = """Long time ago, there lived a King. He was lazy and liked all the comforts of life. He never carried out his duties as a King. "Our King does not take care of our needs. He also ignores the affairs of his kingdom." The people complained.
One day, the King went into the forest to hunt. After having wandered for quite sometime, he became thirsty. To his relief, he spotted a lake. As he was drinking water, he suddenly saw a golden swan come out of the lake and perch on a stone. "Oh! A golden swan. I must capture it," thought the King.
But as soon as he held his bow up, the swan disappeared. And the King heard a voice, "I am the Golden Swan. If you want to capture me, you must come to heaven."
Surprised, the King said, "Please show me the way to heaven." Do good deeds, serve your people and the messenger from heaven would come to fetch you to heaven," replied the voice.
The selfish King, eager to capture the Swan, tried doing some good deeds in his Kingdom. "Now, I suppose a messenger will come to take me to heaven," he thought. But, no messenger came.
The King then disguised himself and went out into the street. There he tried helping an old man. But the old man became angry and said, "You need not try to help. I am in this miserable state because of out selfish King. He has done nothing for his people."
Suddenly, the King heard the golden swan’s voice, "Do good deeds and you will come to heaven." It dawned on the King that by doing selfish acts, he will not go to heaven.
He realized that his people needed him and carrying out his duties was the only way to heaven. After that day he became a responsible King.
"""
"""
Explanation: Exercise 2: list methods
In this exercise, we will focus on the following list methods:
a.) append
b.) count
c.) index
d.) insert
e.) pop
For each of the aforementioned list methods:
explain the positional parameters
explain the keyword parameters
you can exclude self from your explanation
explain what the goal of the method is and what data type it returns, e.g., string, list, set, etc.
give a working example. Provide also an example by providing a value for keyword parameters (assuming the method has one or more keyword parameters).
Exercise 3: set methods
In this exercise, we will focus on the following set methods:
* update
* pop
* remove
* clear
For each of the aforementioned set methods:
explain the positional parameters
explain the keyword parameters
you can exclude self from your explanation
explain what the goal of the method is and what data type it returns, e.g., string, list, set, etc.
give a working example. Provide also an example by providing a value for keyword parameters (assuming the method has one or more keyword parameters).
Please fill in your answers here:
Exercise 4: Analyzing vocabulary using sets
Please consider the following two texts:
These stories were copied from here.
End of explanation
"""
# your code here
"""
Explanation: Exercise 4a: preprocessing text
Before analyzing the two texts, we are first going to preprocess them.
Please use a particular string method multiple times to replace the following characters by empty strings in both a_story and another_story:
* newlines: '\n'
* commas: ','
* dots: '.'
* quotes: '"'
Please assign the processed texts to the variables cleaned_story and cleaned_another_story.
End of explanation
"""
#you code here
"""
Explanation: Exercise 4b: from text to a list
For each text (cleaned_story and cleaned_another_story), please use a string method to convert cleaned_story and cleaned_another_story into lists by splitting using spaces. Please call the lists list_cleaned_story and list_cleaned_another_story.
End of explanation
"""
vocab_a_story = set()
for word in list_cleaned_story:
# insert your code here
"""
Explanation: Exercise 4c: from a list to a vocabulary (a set)
Please create a set for the words in each text by adding each word to a set. In the end, you should have two variables vocab_a_story and vocab_another_story, each containing the unique words in each story. Please use the output of Exercise 4b as the input for this exercise.
End of explanation
"""
# you code
"""
Explanation: do the same for the other text
End of explanation
"""
# your code
"""
Explanation: Exercise 4d: analyzing vocabularies
Please analyze the vocabularies by using set methods to determine:
* which words occur in both texts
* which words only occur in a_story
* which words only occur in another_story
End of explanation
"""
words = ['there',
'was',
'a',
'village',
'near',
'a',
'jungle',
'the',
'village',
'cows',
'used',
'to',
'go',
'up',
'to',
'the',
'jungle',
'in',
'search',
'of',
'food.',
'in',
'the',
'forest',
'there',
'lived',
'a',
'wicked',
'lion',
'he',
'used',
'to',
'kill',
'a',
'cow',
'now',
'and',
'then',
'and',
'eat',
'her',
'this',
'was',
'happening',
'for',
'quite',
'sometime',
'the',
'cows',
'were',
'frightened',
'one',
'day',
'all',
'the',
'cows',
'held',
'a',
'meeting',
'an',
'old',
'cow',
'said',
'listen',
'everybody',
'the',
'lion',
'eats',
'one',
'of',
'us',
'only',
'because',
'we',
'go',
'into',
'the',
'jungle',
'separately',
'from',
'now',
'on',
'we',
'will',
'all',
'be',
'together',
'from',
'then',
'on',
'all',
'the',
'cows',
'went',
'into',
'the',
'jungle',
'in',
'a',
'herd',
'when',
'they',
'heard',
'or',
'saw',
'the',
'lion',
'all',
'of',
'them',
'unitedly',
'moo',
'and',
'chased',
'him',
'away',
'moral',
'divided',
'we',
'fall',
'united',
'we',
'stand']
"""
Explanation: Exercise 5: counting
Below you find a list called words, which is a list of strings.
a.) Please create a dictionary in which the key is the word, and the value is the frequency of the word.
Exclude all words which meet at least one of the following requirements:
end with the letter e
start with the letter t
start with the letter c and end with the letter w (both conditions must be met)
have six or more letters
You are not allowed to use the collections module to do this.
End of explanation
"""
for word in ['up', 'near' , 'together', 'lion', 'cow']:
# print frequency
"""
Explanation: b.) Analyze your dicitionary by printing:
* how many keys it has
* what the highest word frequency is
* the sum of all values.
c.) In addition, print the frequencies of the following words using your dictionary (if the word does not occur in the dictionary, print 'WORD does not occur')
* up
* near
* together
* lion
* cow
End of explanation
"""
def print_beersong(start_number):
"""
"""
"""
Explanation: Exercise 6: Functions
Exercise 6a: the beersong
Please write a function that prints the beersong when it is called.
The function:
* is called print_beersong
* has one positional parameter start_number (this is 99 in the original song)
* prints the beer song
End of explanation
"""
def clean_text(text):
""""""
"""
Explanation: Exercise 6b: the whatever can be in a bottle song
There are other liquids than beer than can be placed in a bottle, e.g., 99 bottles of water on the wall..
Please write a function that prints a variation of the beersong when it is called. All occurrences of beer will be replaced by what the user provides as an argument, e.g., water.
The function:
* is called print_liquids
* has one positional parameter: start_number (this is 99 in the original song)
* has one keyword parameter: liquid (set the default value to beer)
* prints a liquids song
Exercise 6c: preprocessing text
Please write the answer to Exercise 4a as a function. The function replaces the following characters by empty spaces in a text:
* newlines: '\n'
* commas: ','
* dots: '.'
* quotes: '"'
The function is:
* is called clean_text
* has one positional parameter text
* return a string, e.g., the cleaned text
End of explanation
"""
def clean_text_general(text, chars_to_remove=)
"""
Explanation: Exercise 6d: preprocessing text in a more general way
Please write a function that replaces all characters that the user provides by empty spaces.
The function is:
* is called clean_text_general
* has one positional parameter text
* has one keyword parameter chars_to_remove, which is a set (set the default to {'\n', ',', '.', '"'})
* return a string, e.g., the cleaned text
When the user provides a different value to chars_to_remove, e.g., {'a'}, then only those characters should be replaced by empty spaces in the text.
End of explanation
"""
def exclude_and_count
"""
Explanation: Please store this function in a file called assignment2_utils.py. Please import the function and call it in this notebook.
Exercise 6e: including and excluding words
Please write Exercise 5a as a function.
The function:
* is called exclude_and_count
* has one positional parameter words, which is a list of strings.
* creates a dictionary in which the key is a word and the value is the frequency of that word.
* words are excluded if they meet one of the following criteria:
* end with the letter e
* start with the letter t
* start with the letter c and end with the letter w (both conditions must be met)
* have six or more letters
* returns a dictionary in which the key is the word and the value is the frequency of the word.
End of explanation
"""
|
theoplatt/jupyter | multipletargets.ipynb | mit | targets = ['ENSG00000069696', 'ENSG00000144285']
targets_string = ', '.join('"{0}"'.format(t) for t in targets)
"""
Explanation: Our list of targets
End of explanation
"""
url = 'https://www.targetvalidation.org/api/latest/public/association/filter'
headers = {"Accept": "application/json"}
# There may be an easier way of building these parameters...
data = "{\"target\":[" + targets_string + "], \"facets\":true}"
response = requests.post(url, headers=headers, data=data)
output = response.json()
"""
Explanation: Make the API call with our list of targets to find the associations. Set facets to true.
End of explanation
"""
#print json.dumps(output, indent=2)
"""
Explanation: Print out all the json returned just for reference
End of explanation
"""
therapeuticareas = []
for bucket in output['facets']['therapeutic_area']['buckets']:
therapeuticareas.append({
'target_count' : bucket['unique_target_count']['value'],
'disease_count' : bucket['unique_disease_count']['value'],
'therapeutic_area' : bucket['label'],
'key' : bucket['key']
})
"""
Explanation: The therapeutic area facets look interesting - lets iterate through these and display
End of explanation
"""
therapeuticareas = sorted(therapeuticareas, key=lambda k: (k['target_count'],k['disease_count']), reverse=True)
"""
Explanation: Sort by target count and then disease count
End of explanation
"""
print tabulate(therapeuticareas, headers="keys", tablefmt="grid")
"""
Explanation: Using the python tabulate library to render a pretty table of our extracted therapeutic areas.
Note: You may need to run pip install tabulate in your python environment
End of explanation
"""
therapeuticareas = therapeuticareas[:5]
print tabulate(therapeuticareas, headers="keys", tablefmt="grid")
"""
Explanation: Lets just consider the first 5 top therapeutic areas
End of explanation
"""
for therapeuticarea in therapeuticareas:
print "Therapeutic area: " + therapeuticarea['therapeutic_area']
data = "{\"target\":[" + targets_string + "], \"facets\":true, \"therapeutic_area\":[\"" + therapeuticarea['key'] + "\"]}"
response = requests.post(url, headers=headers, data=data)
output = response.json()
diseases = []
for bucket in output['facets']['disease']['buckets']:
diseases.append({
'target_count' : bucket['unique_target_count']['value'],
'doc_count' : bucket['doc_count'],
'key' : bucket['key']
})
# Sort and take top 5
diseases = sorted(diseases, key=lambda k: (k['target_count'],k['doc_count']), reverse=True)
diseases = diseases[:5]
print tabulate(diseases, headers="keys", tablefmt="grid")
print ""
"""
Explanation: Now for each of those identify the top 5 diseases. Unfortunately we don't get the disease names in the facets, just the codes. Is this is the right approach then an API change???
End of explanation
"""
|
tensorflow/docs-l10n | site/en-snapshot/tutorials/distribute/parameter_server_training.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
!pip install portpicker
#@title
import multiprocessing
import os
import random
import portpicker
import tensorflow as tf
"""
Explanation: Parameter server training with ParameterServerStrategy
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/distribute/parameter_server_training"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/parameter_server_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/distribute/parameter_server_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/distribute/parameter_server_training.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Overview
Parameter server training is a common data-parallel method to scale up model training on multiple machines.
A parameter server training cluster consists of workers and parameter servers. Variables are created on parameter servers and they are read and updated by workers in each step. By default, workers read and update these variables independently without synchronizing with each other. This is why sometimes parameter server-style training is called asynchronous training.
In TensorFlow 2, parameter server training is powered by the tf.distribute.ParameterServerStrategy class, which distributes the training steps to a cluster that scales up to thousands of workers (accompanied by parameter servers).
Supported training methods
There are two main supported training methods:
The Keras Model.fit API: if you prefer a high-level abstraction and handling of training. This is generally recommended if you are training a tf.keras.Model.
A custom training loop: if you prefer to define the details of your training loop (you can refer to guides on Custom training, Writing a training loop from scratch
and Custom training loop with Keras and MultiWorkerMirroredStrategy for more details).
A cluster with jobs and tasks
Regardless of the API of choice (Model.fit or a custom training loop), distributed training in TensorFlow 2 involves a 'cluster' with several 'jobs', and each of the jobs may have one or more 'tasks'.
When using parameter server training, it is recommended to have:
One coordinator job (which has the job name chief)
Multiple worker jobs (job name worker)
Multiple parameter server jobs (job name ps)
The coordinator creates resources, dispatches training tasks, writes checkpoints, and deals with task failures. The workers and parameter servers run tf.distribute.Server instances that listen for requests from the coordinator.
Parameter server training with the Model.fit API
Parameter server training with the Model.fit API requires the coordinator to use a tf.distribute.ParameterServerStrategy object. Similar to Model.fit usage with no strategy, or with other strategies, the workflow involves creating and compiling the model, preparing the callbacks, and calling Model.fit.
Parameter server training with a custom training loop
With custom training loops, the tf.distribute.coordinator.ClusterCoordinator class is the key component used for the coordinator.
The ClusterCoordinator class needs to work in conjunction with a tf.distribute.Strategy object.
This tf.distribute.Strategy object is needed to provide the information of the cluster and is used to define a training step, as demonstrated in Custom training with tf.distribute.Strategy.
The ClusterCoordinator object then dispatches the execution of these training steps to remote workers.
For parameter server training, the ClusterCoordinator needs to work with a tf.distribute.ParameterServerStrategy.
The most important API provided by the ClusterCoordinator object is schedule:
The schedule API enqueues a tf.function and returns a future-like RemoteValue immediately.
The queued functions will be dispatched to remote workers in background threads and their RemoteValues will be filled asynchronously.
Since schedule doesn’t require worker assignment, the tf.function passed in can be executed on any available worker.
If the worker it is executed on becomes unavailable before its completion, the function will be retried on another available worker.
Because of this fact and the fact that function execution is not atomic, a single function call may be executed more than once.
In addition to dispatching remote functions, the ClusterCoordinator also helps
to create datasets on all the workers and rebuild these datasets when a worker recovers from failure.
Tutorial setup
The tutorial will branch into Model.fit and custom training loop paths, and you can choose the one that fits your needs. Sections other than "Training with X" are applicable to both paths.
End of explanation
"""
def create_in_process_cluster(num_workers, num_ps):
"""Creates and starts local servers and returns the cluster_resolver."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
cluster_dict["worker"] = ["localhost:%s" % port for port in worker_ports]
if num_ps > 0:
cluster_dict["ps"] = ["localhost:%s" % port for port in ps_ports]
cluster_spec = tf.train.ClusterSpec(cluster_dict)
# Workers need some inter_ops threads to work properly.
worker_config = tf.compat.v1.ConfigProto()
if multiprocessing.cpu_count() < num_workers + 1:
worker_config.inter_op_parallelism_threads = num_workers + 1
for i in range(num_workers):
tf.distribute.Server(
cluster_spec,
job_name="worker",
task_index=i,
config=worker_config,
protocol="grpc")
for i in range(num_ps):
tf.distribute.Server(
cluster_spec,
job_name="ps",
task_index=i,
protocol="grpc")
cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver(
cluster_spec, rpc_layer="grpc")
return cluster_resolver
# Set the environment variable to allow reporting worker and ps failure to the
# coordinator. This is a workaround and won't be necessary in the future.
os.environ["GRPC_FAIL_FAST"] = "use_caller"
NUM_WORKERS = 3
NUM_PS = 2
cluster_resolver = create_in_process_cluster(NUM_WORKERS, NUM_PS)
"""
Explanation: Cluster setup
As mentioned above, a parameter server training cluster requires a coordinator task that runs your training program, one or several workers and parameter server tasks that run TensorFlow servers—tf.distribute.Server—and possibly an additional evaluation task that runs sidecar evaluation (refer to the sidecar evaluation section below). The requirements to set them up are:
The coordinator task needs to know the addresses and ports of all other TensorFlow servers, except the evaluator.
The workers and parameter servers need to know which port they need to listen to. For the sake of simplicity, you can usually pass in the complete cluster information when creating TensorFlow servers on these tasks.
The evaluator task doesn’t have to know the setup of the training cluster. If it does, it should not attempt to connect to the training cluster.
Workers and parameter servers should have task types as "worker" and "ps", respectively. The coordinator should use "chief" as the task type for legacy reasons.
In this tutorial, you will create an in-process cluster so that the whole parameter server training can be run in Colab. You will learn how to set up real clusters in a later section.
In-process cluster
You will start by creating several TensorFlow servers in advance and you will connect to them later. Note that this is only for the purpose of this tutorial's demonstration, and in real training the servers will be started on "worker" and "ps" machines.
End of explanation
"""
variable_partitioner = (
tf.distribute.experimental.partitioners.MinSizePartitioner(
min_shard_bytes=(256 << 10),
max_shards=NUM_PS))
strategy = tf.distribute.ParameterServerStrategy(
cluster_resolver,
variable_partitioner=variable_partitioner)
"""
Explanation: The in-process cluster setup is frequently used in unit testing, such as here.
Another option for local testing is to launch processes on the local machine—check out Multi-worker training with Keras for an example of this approach.
Instantiate a ParameterServerStrategy
Before you dive into the training code, let's instantiate a tf.distribute.ParameterServerStrategy object. Note that this is needed regardless of whether you are proceeding with Model.fit or a custom training loop. The variable_partitioner argument will be explained in the Variable sharding section.
End of explanation
"""
global_batch_size = 64
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10,))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).shuffle(10).repeat()
dataset = dataset.batch(global_batch_size)
dataset = dataset.prefetch(2)
"""
Explanation: In order to use GPUs for training, allocate GPUs visible to each worker. ParameterServerStrategy will use all the available GPUs on each worker, with the restriction that all workers should have the same number of GPUs available.
Variable sharding
Variable sharding refers to splitting a variable into multiple smaller
variables, which are called shards. Variable sharding may be useful to distribute the network load when accessing these shards. It is also useful to distribute computation and storage of a normal variable across multiple parameter servers, for example, when using very large embeddings
that may not fit in a single machine's memory.
To enable variable sharding, you can pass in a variable_partitioner when
constructing a ParameterServerStrategy object. The variable_partitioner will
be invoked every time when a variable is created and it is expected to return
the number of shards along each dimension of the variable. Some out-of-box
variable_partitioners are provided such as
tf.distribute.experimental.partitioners.MinSizePartitioner. It is recommended to use size-based partitioners like
tf.distribute.experimental.partitioners.MinSizePartitioner to avoid
partitioning small variables, which could have a negative impact on model training
speed.
When a variable_partitioner is passed in, and you create a variable directly
under Strategy.scope, the variable will become a container type with a variables
property, which provides access to the list of shards. In most cases, this
container will be automatically converted to a Tensor by concatenating all the
shards. As a result, it can be used as a normal variable. On the other hand,
some TensorFlow methods such as tf.nn.embedding_lookup provide efficient
implementation for this container type and in these methods automatic
concatenation will be avoided.
Refer to the API docs of tf.distribute.ParameterServerStrategy for more details.
Training with Model.fit
<a id="training_with_modelfit"></a>
Keras provides an easy-to-use training API via Model.fit that handles the training loop under the hood, with the flexibility of an overridable train_step, and callbacks which provide functionalities such as checkpoint saving or summary saving for TensorBoard. With Model.fit, the same training code can be used with other strategies with a simple swap of the strategy object.
Input data
Keras Model.fit with tf.distribute.ParameterServerStrategy can take input data in the form of a tf.data.Dataset, tf.distribute.DistributedDataset, or a tf.keras.utils.experimental.DatasetCreator, with Dataset being the recommended option for ease of use. If you encounter memory issues using Dataset, however, you may need to use DatasetCreator with a callable dataset_fn argument (refer to the tf.keras.utils.experimental.DatasetCreator API documentation for details).
If you transform your dataset into a tf.data.Dataset, you should use Dataset.shuffle and Dataset.repeat, as demonstrated in the code example below.
Keras Model.fit with parameter server training assumes that each worker receives the same dataset, except when it is shuffled differently. Therefore, by calling Dataset.shuffle, you ensure more even iterations over the data.
Because workers do not synchronize, they may finish processing their datasets at different times. Therefore, the easiest way to define epochs with parameter server training is to use Dataset.repeat—which repeats a dataset indefinitely when called without an argument—and specify the steps_per_epoch argument in the Model.fit call.
Refer to the "Training workflows" section of the tf.data guide for more details on shuffle and repeat.
End of explanation
"""
with strategy.scope():
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
model.compile(tf.keras.optimizers.SGD(), loss="mse", steps_per_execution=10)
"""
Explanation: If you instead create your dataset with tf.keras.utils.experimental.DatasetCreator, the code in dataset_fn will be invoked on the input device, which is usually the CPU, on each of the worker machines.
Model construction and compiling
Now, you will create a tf.keras.Model—a trivial tf.keras.models.Sequential model for demonstration purposes—followed by a Model.compile call to incorporate components, such as an optimizer, metrics, and other parameters such as steps_per_execution:
End of explanation
"""
working_dir = "/tmp/my_working_dir"
log_dir = os.path.join(working_dir, "log")
ckpt_filepath = os.path.join(working_dir, "ckpt")
backup_dir = os.path.join(working_dir, "backup")
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=log_dir),
tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_filepath),
tf.keras.callbacks.BackupAndRestore(backup_dir=backup_dir),
]
model.fit(dataset, epochs=5, steps_per_epoch=20, callbacks=callbacks)
"""
Explanation: Callbacks and training
<a id="callbacks-and-training"> </a>
Before you call Keras Model.fit for the actual training, prepare any needed callbacks for common tasks, such as:
tf.keras.callbacks.ModelCheckpoint: saves the model at a certain frequency, such as after every epoch.
tf.keras.callbacks.BackupAndRestore: provides fault tolerance by backing up the model and current epoch number, if the cluster experiences unavailability (such as abort or preemption). You can then restore the training state upon a restart from a job failure, and continue training from the beginning of the interrupted epoch.
tf.keras.callbacks.TensorBoard: periodically writes model logs in summary files that can be visualized in the TensorBoard tool.
Note: Due to performance considerations, custom callbacks cannot have batch level callbacks overridden when used with ParameterServerStrategy. Please modify your custom callbacks to make them epoch level calls, and adjust steps_per_epoch to a suitable value. In addition, steps_per_epoch is a required argument for Model.fit when used with ParameterServerStrategy.
End of explanation
"""
feature_vocab = [
"avenger", "ironman", "batman", "hulk", "spiderman", "kingkong", "wonder_woman"
]
label_vocab = ["yes", "no"]
with strategy.scope():
feature_lookup_layer = tf.keras.layers.StringLookup(
vocabulary=feature_vocab,
mask_token=None)
label_lookup_layer = tf.keras.layers.StringLookup(
vocabulary=label_vocab,
num_oov_indices=0,
mask_token=None)
raw_feature_input = tf.keras.layers.Input(
shape=(3,),
dtype=tf.string,
name="feature")
feature_id_input = feature_lookup_layer(raw_feature_input)
feature_preprocess_stage = tf.keras.Model(
{"features": raw_feature_input},
feature_id_input)
raw_label_input = tf.keras.layers.Input(
shape=(1,),
dtype=tf.string,
name="label")
label_id_input = label_lookup_layer(raw_label_input)
label_preprocess_stage = tf.keras.Model(
{"label": raw_label_input},
label_id_input)
"""
Explanation: Direct usage with ClusterCoordinator (optional)
Even if you choose the Model.fit training path, you can optionally instantiate a tf.distribute.coordinator.ClusterCoordinator object to schedule other functions you would like to be executed on the workers. Refer to the Training with a custom training loop section for more details and examples.
Training with a custom training loop
<a id="training_with_custom_training_loop"> </a>
Using custom training loops with tf.distribute.Strategy provides great flexibility to define training loops. With the ParameterServerStrategy defined above (as strategy), you will use a tf.distribute.coordinator.ClusterCoordinator to dispatch the execution of training steps to remote workers.
Then, you will create a model, define a dataset, and define a step function, as you have done in the training loop with other tf.distribute.Strategys. You can find more details in the Custom training with tf.distribute.Strategy tutorial.
To ensure efficient dataset prefetching, use the recommended distributed dataset creation APIs mentioned in the Dispatch training steps to remote workers section below. Also, make sure to call Strategy.run inside worker_fn to take full advantage of GPUs allocated to workers. The rest of the steps are the same for training with or without GPUs.
Let’s create these components in the following steps:
Set up the data
First, write a function that creates a dataset.
If you would like to preprocess the data with Keras preprocessing layers or Tensorflow Transform layers, create these layers outside the dataset_fn and under Strategy.scope, like you would do for any other Keras layers. This is because the dataset_fn will be wrapped into a tf.function and then executed on each worker to generate the data pipeline.
If you don't follow the above procedure, creating the layers might create Tensorflow states which will be lifted out of the tf.function to the coordinator. Thus, accessing them on workers would incur repetitive RPC calls between coordinator and workers, and cause significant slowdown.
Placing the layers under Strategy.scope will instead create them on all workers. Then, you will apply the transformation inside the dataset_fn via tf.data.Dataset.map. Refer to Data preprocessing in the Distributed input tutorial for more information on data preprocessing with distributed input.
End of explanation
"""
def feature_and_label_gen(num_examples=200):
examples = {"features": [], "label": []}
for _ in range(num_examples):
features = random.sample(feature_vocab, 3)
label = ["yes"] if "avenger" in features else ["no"]
examples["features"].append(features)
examples["label"].append(label)
return examples
examples = feature_and_label_gen()
"""
Explanation: Generate toy examples in a dataset:
End of explanation
"""
def dataset_fn(_):
raw_dataset = tf.data.Dataset.from_tensor_slices(examples)
train_dataset = raw_dataset.map(
lambda x: (
{"features": feature_preprocess_stage(x["features"])},
label_preprocess_stage(x["label"])
)).shuffle(200).batch(32).repeat()
return train_dataset
"""
Explanation: Then, create the training dataset wrapped in a dataset_fn:
End of explanation
"""
# These variables created under the `Strategy.scope` will be placed on parameter
# servers in a round-robin fashion.
with strategy.scope():
# Create the model. The input needs to be compatible with Keras processing layers.
model_input = tf.keras.layers.Input(
shape=(3,), dtype=tf.int64, name="model_input")
emb_layer = tf.keras.layers.Embedding(
input_dim=len(feature_lookup_layer.get_vocabulary()), output_dim=16384)
emb_output = tf.reduce_mean(emb_layer(model_input), axis=1)
dense_output = tf.keras.layers.Dense(units=1, activation="sigmoid")(emb_output)
model = tf.keras.Model({"features": model_input}, dense_output)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1)
accuracy = tf.keras.metrics.Accuracy()
"""
Explanation: Build the model
Next, create the model and other objects. Make sure to create all variables under Strategy.scope.
End of explanation
"""
assert len(emb_layer.weights) == 2
assert emb_layer.weights[0].shape == (4, 16384)
assert emb_layer.weights[1].shape == (4, 16384)
print(emb_layer.weights[0].device)
print(emb_layer.weights[1].device)
"""
Explanation: Let's confirm that the use of FixedShardsPartitioner split all variables into two shards and that each shard was assigned to a different parameter server:
End of explanation
"""
@tf.function
def step_fn(iterator):
def replica_fn(batch_data, labels):
with tf.GradientTape() as tape:
pred = model(batch_data, training=True)
per_example_loss = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)(labels, pred)
loss = tf.nn.compute_average_loss(per_example_loss)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
accuracy.update_state(labels, actual_pred)
return loss
batch_data, labels = next(iterator)
losses = strategy.run(replica_fn, args=(batch_data, labels))
return strategy.reduce(tf.distribute.ReduceOp.SUM, losses, axis=None)
"""
Explanation: Define the training step
Third, create the training step wrapped into a tf.function:
End of explanation
"""
coordinator = tf.distribute.coordinator.ClusterCoordinator(strategy)
"""
Explanation: In the above training step function, calling Strategy.run and Strategy.reduce in the step_fn can support multiple GPUs per worker. If the workers have GPUs allocated, Strategy.run will distribute the datasets on multiple replicas.
Dispatch training steps to remote workers
<a id="dispatch_training_steps_to_remote_workers"> </a>
After all the computations are defined by ParameterServerStrategy, you will use the tf.distribute.coordinator.ClusterCoordinator class to create resources and distribute the training steps to remote workers.
Let’s first create a ClusterCoordinator object and pass in the strategy object:
End of explanation
"""
@tf.function
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(dataset_fn)
per_worker_dataset = coordinator.create_per_worker_dataset(per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
"""
Explanation: Then, create a per-worker dataset and an iterator using the ClusterCoordinator.create_per_worker_dataset API, which replicates the dataset to all workers. In the per_worker_dataset_fn below, wrapping the dataset_fn into strategy.distribute_datasets_from_function is recommended to allow efficient prefetching to GPUs seamlessly.
End of explanation
"""
num_epochs = 4
steps_per_epoch = 5
for i in range(num_epochs):
accuracy.reset_states()
for _ in range(steps_per_epoch):
coordinator.schedule(step_fn, args=(per_worker_iterator,))
# Wait at epoch boundaries.
coordinator.join()
print("Finished epoch %d, accuracy is %f." % (i, accuracy.result().numpy()))
"""
Explanation: The final step is to distribute the computation to remote workers using ClusterCoordinator.schedule:
The schedule method enqueues a tf.function and returns a future-like RemoteValue immediately. The queued functions will be dispatched to remote workers in background threads and the RemoteValue will be filled asynchronously.
The join method (ClusterCoordinator.join) can be used to wait until all scheduled functions are executed.
End of explanation
"""
loss = coordinator.schedule(step_fn, args=(per_worker_iterator,))
print("Final loss is %f" % loss.fetch())
"""
Explanation: Here is how you can fetch the result of a RemoteValue:
End of explanation
"""
eval_dataset = tf.data.Dataset.from_tensor_slices(
feature_and_label_gen(num_examples=16)).map(
lambda x: (
{"features": feature_preprocess_stage(x["features"])},
label_preprocess_stage(x["label"])
)).batch(8)
eval_accuracy = tf.keras.metrics.Accuracy()
for batch_data, labels in eval_dataset:
pred = model(batch_data, training=False)
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
eval_accuracy.update_state(labels, actual_pred)
print("Evaluation accuracy: %f" % eval_accuracy.result())
"""
Explanation: Alternatively, you can launch all steps and do something while waiting for
completion:
python
for _ in range(total_steps):
coordinator.schedule(step_fn, args=(per_worker_iterator,))
while not coordinator.done():
time.sleep(10)
# Do something like logging metrics or writing checkpoints.
For the complete training and serving workflow for this particular example, please check out this test.
More about dataset creation
The dataset in the above code is created using the ClusterCoordinator.create_per_worker_dataset API. It creates one dataset per worker and returns a container object. You can call the iter method on it to create a per-worker iterator. The per-worker iterator contains one iterator per worker and the corresponding slice of a worker will be substituted in the input argument of the function passed to the ClusterCoordinator.schedule method before the function is executed on a particular worker.
The ClusterCoordinator.schedule method assumes workers are equivalent and thus assumes the datasets on different workers are the same (except that they may be shuffled differently). Because of this, it is also recommended to repeat datasets, and schedule a finite number of steps instead of relying on receiving an OutOfRangeError from a dataset.
Another important note is that tf.data datasets don’t support implicit serialization and deserialization across task boundaries. So it is important to create the whole dataset inside the function passed to ClusterCoordinator.create_per_worker_dataset. The create_per_worker_dataset API can also directly take a tf.data.Dataset or tf.distribute.DistributedDataset as input.
Evaluation
The two main approaches to performing evaluation with tf.distribute.ParameterServerStrategy training are inline evaluation and sidecar evaluation. Each has its own pros and cons as described below. The inline evaluation method is recommended if you don't have a preference.
Inline evaluation
In this method, the coordinator alternates between training and evaluation, and thus it is called inline evaluation.
There are several benefits of inline evaluation. For example:
It can support large evaluation models and evaluation datasets that a single task cannot hold.
The evaluation results can be used to make decisions for training the next epoch, for example, whether to stop training early.
There are two ways to implement inline evaluation: direct evaluation and distributed evaluation.
Direct evaluation: For small models and evaluation datasets, the coordinator can run evaluation directly on the distributed model with the evaluation dataset on the coordinator:
End of explanation
"""
with strategy.scope():
# Define the eval metric on parameter servers.
eval_accuracy = tf.keras.metrics.Accuracy()
@tf.function
def eval_step(iterator):
def replica_fn(batch_data, labels):
pred = model(batch_data, training=False)
actual_pred = tf.cast(tf.greater(pred, 0.5), tf.int64)
eval_accuracy.update_state(labels, actual_pred)
batch_data, labels = next(iterator)
strategy.run(replica_fn, args=(batch_data, labels))
def eval_dataset_fn():
return tf.data.Dataset.from_tensor_slices(
feature_and_label_gen(num_examples=16)).map(
lambda x: (
{"features": feature_preprocess_stage(x["features"])},
label_preprocess_stage(x["label"])
)).shuffle(16).repeat().batch(8)
per_worker_eval_dataset = coordinator.create_per_worker_dataset(eval_dataset_fn)
per_worker_eval_iterator = iter(per_worker_eval_dataset)
eval_steps_per_epoch = 2
for _ in range(eval_steps_per_epoch):
coordinator.schedule(eval_step, args=(per_worker_eval_iterator,))
coordinator.join()
print("Evaluation accuracy: %f" % eval_accuracy.result())
"""
Explanation: Distributed evaluation: For large models or datasets that are infeasible to run directly on the coordinator, the coordinator task can distribute evaluation tasks to the workers via the ClusterCoordinator.schedule/ClusterCoordinator.join methods:
End of explanation
"""
|
tombstone/models | official/colab/nlp/customize_encoder.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
!pip install -q tf-nightly
!pip install -q tf-models-nightly
"""
Explanation: Customizing a Transformer Encoder
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/official_models/nlp/customize_encoder"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/models/blob/master/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/models/blob/master/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/models/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Learning objectives
The TensorFlow Models NLP library is a collection of tools for building and training modern high performance natural language models.
The TransformEncoder is the core of this library, and lots of new network architectures are proposed to improve the encoder. In this Colab notebook, we will learn how to customize the encoder to employ new network architectures.
Install and import
Install the TensorFlow Model Garden pip package
tf-models-nightly is the nightly Model Garden package created daily automatically.
pip will install all models and dependencies automatically.
End of explanation
"""
import numpy as np
import tensorflow as tf
from official.modeling import activations
from official.nlp import modeling
from official.nlp.modeling import layers, losses, models, networks
"""
Explanation: Import Tensorflow and other libraries
End of explanation
"""
cfg = {
"vocab_size": 100,
"hidden_size": 32,
"num_layers": 3,
"num_attention_heads": 4,
"intermediate_size": 64,
"activation": activations.gelu,
"dropout_rate": 0.1,
"attention_dropout_rate": 0.1,
"sequence_length": 16,
"type_vocab_size": 2,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
bert_encoder = modeling.networks.TransformerEncoder(**cfg)
def build_classifier(bert_encoder):
return modeling.models.BertClassifier(bert_encoder, num_classes=2)
canonical_classifier_model = build_classifier(bert_encoder)
"""
Explanation: Canonical BERT encoder
Before learning how to customize the encoder, let's firstly create a canonical BERT enoder and use it to instantiate a BertClassifier for classification task.
End of explanation
"""
def predict(model):
batch_size = 3
np.random.seed(0)
word_ids = np.random.randint(
cfg["vocab_size"], size=(batch_size, cfg["sequence_length"]))
mask = np.random.randint(2, size=(batch_size, cfg["sequence_length"]))
type_ids = np.random.randint(
cfg["type_vocab_size"], size=(batch_size, cfg["sequence_length"]))
print(model([word_ids, mask, type_ids], training=False))
predict(canonical_classifier_model)
"""
Explanation: canonical_classifier_model can be trained using the training data. For details about how to train the model, please see the colab fine_tuning_bert.ipynb. We skip the code that trains the model here.
After training, we can apply the model to do prediction.
End of explanation
"""
default_hidden_cfg = dict(
num_attention_heads=cfg["num_attention_heads"],
intermediate_size=cfg["intermediate_size"],
intermediate_activation=activations.gelu,
dropout_rate=cfg["dropout_rate"],
attention_dropout_rate=cfg["attention_dropout_rate"],
kernel_initializer=tf.keras.initializers.TruncatedNormal(0.02),
)
default_embedding_cfg = dict(
vocab_size=cfg["vocab_size"],
type_vocab_size=cfg["type_vocab_size"],
hidden_size=cfg["hidden_size"],
seq_length=cfg["sequence_length"],
initializer=tf.keras.initializers.TruncatedNormal(0.02),
dropout_rate=cfg["dropout_rate"],
max_seq_length=cfg["sequence_length"],
)
default_kwargs = dict(
hidden_cfg=default_hidden_cfg,
embedding_cfg=default_embedding_cfg,
num_hidden_instances=cfg["num_layers"],
pooled_output_dim=cfg["hidden_size"],
return_all_layer_outputs=True,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(0.02),
)
encoder_scaffold = modeling.networks.EncoderScaffold(**default_kwargs)
classifier_model_from_encoder_scaffold = build_classifier(encoder_scaffold)
classifier_model_from_encoder_scaffold.set_weights(
canonical_classifier_model.get_weights())
predict(classifier_model_from_encoder_scaffold)
"""
Explanation: Customize BERT encoder
One BERT encoder consists of an embedding network and multiple transformer blocks, and each transformer block contains an attention layer and a feedforward layer.
We provide easy ways to customize each of those components via (1)
EncoderScaffold and (2) TransformerScaffold.
Use EncoderScaffold
EncoderScaffold allows users to provide a custom embedding subnetwork
(which will replace the standard embedding logic) and/or a custom hidden layer class (which will replace the Transformer instantiation in the encoder).
Without Customization
Without any customization, EncoderScaffold behaves the same the canonical TransformerEncoder.
As shown in the following example, EncoderScaffold can load TransformerEncoder's weights and output the same values:
End of explanation
"""
word_ids = tf.keras.layers.Input(
shape=(cfg['sequence_length'],), dtype=tf.int32, name="input_word_ids")
mask = tf.keras.layers.Input(
shape=(cfg['sequence_length'],), dtype=tf.int32, name="input_mask")
embedding_layer = modeling.layers.OnDeviceEmbedding(
vocab_size=cfg['vocab_size'],
embedding_width=cfg['hidden_size'],
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
name="word_embeddings")
word_embeddings = embedding_layer(word_ids)
attention_mask = layers.SelfAttentionMask()([word_embeddings, mask])
new_embedding_network = tf.keras.Model([word_ids, mask],
[word_embeddings, attention_mask])
"""
Explanation: Customize Embedding
Next, we show how to use a customized embedding network.
We firstly build an embedding network that will replace the default network. This one will have 2 inputs (mask and word_ids) instead of 3, and won't use positional embeddings.
End of explanation
"""
tf.keras.utils.plot_model(new_embedding_network, show_shapes=True, dpi=48)
"""
Explanation: Inspecting new_embedding_network, we can see it takes two inputs:
input_word_ids and input_mask.
End of explanation
"""
kwargs = dict(default_kwargs)
# Use new embedding network.
kwargs['embedding_cls'] = new_embedding_network
kwargs['embedding_data'] = embedding_layer.embeddings
encoder_with_customized_embedding = modeling.networks.EncoderScaffold(**kwargs)
classifier_model = build_classifier(encoder_with_customized_embedding)
# ... Train the model ...
print(classifier_model.inputs)
# Assert that there are only two inputs.
assert len(classifier_model.inputs) == 2
"""
Explanation: We then can build a new encoder using the above new_embedding_network.
End of explanation
"""
kwargs = dict(default_kwargs)
# Use ReZeroTransformer.
kwargs['hidden_cls'] = modeling.layers.ReZeroTransformer
encoder_with_rezero_transformer = modeling.networks.EncoderScaffold(**kwargs)
classifier_model = build_classifier(encoder_with_rezero_transformer)
# ... Train the model ...
predict(classifier_model)
# Assert that the variable `rezero_alpha` from ReZeroTransformer exists.
assert 'rezero_alpha' in ''.join([x.name for x in classifier_model.trainable_weights])
"""
Explanation: Customized Transformer
User can also override the hidden_cls argument in EncoderScaffold's constructor to employ a customized Transformer layer.
See ReZeroTransformer for how to implement a customized Transformer layer.
Following is an example of using ReZeroTransformer:
End of explanation
"""
# Use TalkingHeadsAttention
hidden_cfg = dict(default_hidden_cfg)
hidden_cfg['attention_cls'] = modeling.layers.TalkingHeadsAttention
kwargs = dict(default_kwargs)
kwargs['hidden_cls'] = modeling.layers.TransformerScaffold
kwargs['hidden_cfg'] = hidden_cfg
encoder = modeling.networks.EncoderScaffold(**kwargs)
classifier_model = build_classifier(encoder)
# ... Train the model ...
predict(classifier_model)
# Assert that the variable `pre_softmax_weight` from TalkingHeadsAttention exists.
assert 'pre_softmax_weight' in ''.join([x.name for x in classifier_model.trainable_weights])
"""
Explanation: Use TransformerScaffold
The above method of customizing Transformer requires rewriting the whole Transformer layer, while sometimes you may only want to customize either attention layer or feedforward block. In this case, TransformerScaffold can be used.
Customize Attention Layer
User can also override the attention_cls argument in TransformerScaffold's constructor to employ a customized Attention layer.
See TalkingHeadsAttention for how to implement a customized Attention layer.
Following is an example of using TalkingHeadsAttention:
End of explanation
"""
# Use TalkingHeadsAttention
hidden_cfg = dict(default_hidden_cfg)
hidden_cfg['feedforward_cls'] = modeling.layers.GatedFeedforward
kwargs = dict(default_kwargs)
kwargs['hidden_cls'] = modeling.layers.TransformerScaffold
kwargs['hidden_cfg'] = hidden_cfg
encoder_with_gated_feedforward = modeling.networks.EncoderScaffold(**kwargs)
classifier_model = build_classifier(encoder_with_gated_feedforward)
# ... Train the model ...
predict(classifier_model)
# Assert that the variable `gate` from GatedFeedforward exists.
assert 'gate' in ''.join([x.name for x in classifier_model.trainable_weights])
"""
Explanation: Customize Feedforward Layer
Similiarly, one could also customize the feedforward layer.
See GatedFeedforward for how to implement a customized feedforward layer.
Following is an example of using GatedFeedforward.
End of explanation
"""
albert_encoder = modeling.networks.AlbertTransformerEncoder(**cfg)
classifier_model = build_classifier(albert_encoder)
# ... Train the model ...
predict(classifier_model)
"""
Explanation: Build a new Encoder using building blocks from KerasBERT.
Finally, you could also build a new encoder using building blocks in the modeling library.
See AlbertTransformerEncoder as an example:
End of explanation
"""
tf.keras.utils.plot_model(albert_encoder, show_shapes=True, dpi=48)
"""
Explanation: Inspecting the albert_encoder, we see it stacks the same Transformer layer multiple times.
End of explanation
"""
|
SSQ/Coursera-UW-Machine-Learning-Classification | Programming Assignment 5/Implementing binary decision trees.ipynb | mit | loans = pd.read_csv('lending-club-data.csv')
loans.head(2)
# safe_loans = 1 => safe
# safe_loans = -1 => risky
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
#loans = loans.remove_column('bad_loans')
loans = loans.drop('bad_loans', axis=1)
features = ['grade', # grade of the loan
'term', # the term of the loan
'home_ownership', # home_ownership status: own, mortgage or rent
'emp_length', # number of years of employment
]
target = 'safe_loans'
loans = loans[features + [target]]
loans.iloc[122602]
"""
Explanation: 1. Load the dataset into a data frame named loans
End of explanation
"""
categorical_variables = []
for feat_name, feat_type in zip(loans.columns, loans.dtypes):
if feat_type == object:
categorical_variables.append(feat_name)
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
#print loans_one_hot_encoded
loans = loans.drop(feature, axis=1)
for col in loans_one_hot_encoded.columns:
loans[col] = loans_one_hot_encoded[col]
print loans.head(2)
print loans.columns
loans.iloc[122602]
with open('module-5-assignment-2-train-idx.json') as train_data_file:
train_idx = json.load(train_data_file)
with open('module-5-assignment-2-test-idx.json') as test_data_file:
test_idx = json.load(test_data_file)
print train_idx[:3]
print test_idx[:3]
print len(train_idx)
print len(test_idx)
train_data = loans.iloc[train_idx]
test_data = loans.iloc[test_idx]
print len(loans.dtypes )
"""
Explanation: One-hot encoding
End of explanation
"""
def intermediate_node_num_mistakes(labels_in_node):
# Corner case: If labels_in_node is empty, return 0
if len(labels_in_node) == 0:
return 0
# Count the number of 1's (safe loans)
## YOUR CODE HERE
safe_loan = (labels_in_node==1).sum()
# Count the number of -1's (risky loans)
## YOUR CODE HERE
risky_loan = (labels_in_node==-1).sum()
# Return the number of mistakes that the majority classifier makes.
## YOUR CODE HERE
return min(safe_loan, risky_loan)
"""
Explanation: Decision tree implementation
Function to count number of mistakes while predicting majority class
Recall from the lecture that prediction at an intermediate node works by predicting the majority class for all data points that belong to this node. Now, we will write a function that calculates the number of misclassified examples when predicting the majority class. This will be used to help determine which feature is the best to split on at a given node of the tree.
Note: Keep in mind that in order to compute the number of mistakes for a majority classifier, we only need the label (y values) of the data points in the node.
Steps to follow:
Step 1: Calculate the number of safe loans and risky loans.
Step 2: Since we are assuming majority class prediction, all the data points that are not in the majority class are considered mistakes.
Step 3: Return the number of mistakes.
7. Now, let us write the function intermediate_node_num_mistakes which computes the number of misclassified examples of an intermediate node given the set of labels (y values) of the data points contained in the node. Your code should be analogous to
End of explanation
"""
# Test case 1
example_labels = np.array([-1, -1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 1 failed... try again!'
# Test case 2
example_labels = np.array([-1, -1, 1, 1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 3 failed... try again!'
# Test case 3
example_labels = np.array([-1, -1, -1, -1, -1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 3 failed... try again!'
"""
Explanation: 8. Because there are several steps in this assignment, we have introduced some stopping points where you can check your code and make sure it is correct before proceeding. To test your intermediate_node_num_mistakes function, run the following code until you get a Test passed!, then you should proceed. Otherwise, you should spend some time figuring out where things went wrong. Again, remember that this code is specific to SFrame, but using your software of choice, you can construct similar tests.
End of explanation
"""
def best_splitting_feature(data, features, target):
target_values = data[target]
best_feature = None # Keep track of the best feature
best_error = 10 # Keep track of the best error so far
# Note: Since error is always <= 1, we should intialize it with something larger than 1.
# Convert to float to make sure error gets computed correctly.
num_data_points = float(len(data))
# Loop through each feature to consider splitting on that feature
for feature in features:
# The left split will have all data points where the feature value is 0
left_split = data[data[feature] == 0]
# The right split will have all data points where the feature value is 1
## YOUR CODE HERE
right_split = data[data[feature] == 1]
# Calculate the number of misclassified examples in the left split.
# Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes)
# YOUR CODE HERE
left_mistakes = intermediate_node_num_mistakes(left_split[target])
# Calculate the number of misclassified examples in the right split.
## YOUR CODE HERE
right_mistakes = intermediate_node_num_mistakes(right_split[target])
# Compute the classification error of this split.
# Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points)
## YOUR CODE HERE
error = (left_mistakes + right_mistakes) / num_data_points
# If this is the best error we have found so far, store the feature as best_feature and the error as best_error
## YOUR CODE HERE
if error < best_error:
best_feature = feature
best_error = error
return best_feature # Return the best feature we found
"""
Explanation: Function to pick best feature to split on
The function best_splitting_feature takes 3 arguments:
The data
The features to consider for splits (a list of strings of column names to consider for splits)
The name of the target/label column (string)
The function will loop through the list of possible features, and consider splitting on each of them. It will calculate the classification error of each split and return the feature that had the smallest classification error when split on.
Recall that the classification error is defined as follows:
9. Follow these steps to implement best_splitting_feature:
Step 1: Loop over each feature in the feature list
Step 2: Within the loop, split the data into two groups: one group where all of the data has feature value 0 or False (we will call this the left split), and one group where all of the data has feature value 1 or True (we will call this the right split). Make sure the left split corresponds with 0 and the right split corresponds with 1 to ensure your implementation fits with our implementation of the tree building process.
Step 3: Calculate the number of misclassified examples in both groups of data and use the above formula to compute theclassification error.
Step 4: If the computed error is smaller than the best error found so far, store this feature and its error.
Note: Remember that since we are only dealing with binary features, we do not have to consider thresholds for real-valued features. This makes the implementation of this function much easier.
Your code should be analogous to
End of explanation
"""
def create_leaf(target_values):
# Create a leaf node
leaf = {'splitting_feature' : None,
'left' : None,
'right' : None,
'is_leaf': True } ## YOUR CODE HERE
# Count the number of data points that are +1 and -1 in this node.
num_ones = len(target_values[target_values == +1])
num_minus_ones = len(target_values[target_values == -1])
# For the leaf node, set the prediction to be the majority class.
# Store the predicted class (1 or -1) in leaf['prediction']
if num_ones > num_minus_ones:
leaf['prediction'] = 1 ## YOUR CODE HERE
else:
leaf['prediction'] = -1 ## YOUR CODE HERE
# Return the leaf node
return leaf
"""
Explanation: Building the tree
With the above functions implemented correctly, we are now ready to build our decision tree. Each node in the decision tree is represented as a dictionary which contains the following keys and possible values:
10. First, we will write a function that creates a leaf node given a set of target values.
Your code should be analogous to
End of explanation
"""
def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10):
remaining_features = features[:] # Make a copy of the features.
target_values = data[target]
print "--------------------------------------------------------------------"
print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values))
# Stopping condition 1
# (Check if there are mistakes at current node.
# Recall you wrote a function intermediate_node_num_mistakes to compute this.)
if intermediate_node_num_mistakes(target_values) == 0: ## YOUR CODE HERE
print "Stopping condition 1 reached."
# If not mistakes at current node, make current node a leaf node
return create_leaf(target_values)
# Stopping condition 2 (check if there are remaining features to consider splitting on)
if remaining_features == []: ## YOUR CODE HERE
print "Stopping condition 2 reached."
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(target_values)
# Additional stopping condition (limit tree depth)
if current_depth >= max_depth: ## YOUR CODE HERE
print "Reached maximum depth. Stopping for now."
# If the max tree depth has been reached, make current node a leaf node
return create_leaf(target_values)
# Find the best splitting feature (recall the function best_splitting_feature implemented above)
## YOUR CODE HERE
splitting_feature = best_splitting_feature(data, remaining_features, target)
# Split on the best feature that we found.
left_split = data[data[splitting_feature] == 0]
right_split = data[data[splitting_feature] == 1] ## YOUR CODE HERE
remaining_features.remove(splitting_feature)
print "Split on feature %s. (%s, %s)" % (\
splitting_feature, len(left_split), len(right_split))
# Create a leaf node if the split is "perfect"
if len(left_split) == len(data):
print "Creating leaf node."
return create_leaf(left_split[target])
if len(right_split) == len(data):
print "Creating leaf node."
## YOUR CODE HERE
return create_leaf(right_split[target])
# Repeat (recurse) on left and right subtrees
left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth)
## YOUR CODE HERE
right_tree = decision_tree_create(right_split, remaining_features, target, current_depth + 1, max_depth)
return {'is_leaf' : False,
'prediction' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree}
"""
Explanation: 11. Now, we will provide a Python skeleton of the learning algorithm. Note that this code is not complete; it needs to be completed by you if you are using Python. Otherwise, your code should be analogous to
Stopping condition 1: All data points in a node are from the same class.
Stopping condition 2: No more features to split on.
Additional stopping condition: In addition to the above two stopping conditions covered in lecture, in this assignment we will also consider a stopping condition based on the max_depth of the tree. By not letting the tree grow too deep, we will save computational effort in the learning process.
End of explanation
"""
input_features = train_data.columns
print list(input_features)
a = list(train_data.columns)
a.remove('safe_loans')
print a
print list(train_data.columns)
my_decision_tree = decision_tree_create(train_data, a, 'safe_loans', current_depth = 0, max_depth = 6)
"""
Explanation: 12. Train a tree model on the train_data. Limit the depth to 6 (max_depth = 6) to make sure the algorithm doesn't run for too long. Call this tree my_decision_tree. Warning: The tree may take 1-2 minutes to learn.
End of explanation
"""
def classify(tree, x, annotate = False):
# if the node is a leaf node.
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['prediction']
return tree['prediction']
else:
# split on feature.
split_feature_value = x[tree['splitting_feature']]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value == 0:
return classify(tree['left'], x, annotate)
else:
### YOUR CODE HERE
return classify(tree['right'], x, annotate)
"""
Explanation: Making predictions with a decision tree
13. As discussed in the lecture, we can make predictions from the decision tree with a simple recursive function. Write a function called classify, which takes in a learned tree and a test point x to classify. Include an option annotate that describes the prediction path when set to True. Your code should be analogous to
End of explanation
"""
print test_data.iloc[0]
print 'Predicted class: %s ' % classify(my_decision_tree, test_data.iloc[0])
"""
Explanation: 14. Now, let's consider the first example of the test set and see what my_decision_tree model predicts for this data point.
End of explanation
"""
classify(my_decision_tree, test_data.iloc[0], annotate=True)
"""
Explanation: 15. Let's add some annotations to our prediction to see what the prediction path was that lead to this predicted class:
End of explanation
"""
def evaluate_classification_error(tree, data):
# Apply the classify(tree, x) to each row in your data
prediction = data.apply(lambda x: classify(tree, x), axis=1)
# Once you've made the predictions, calculate the classification error and return it
## YOUR CODE HERE
return (data['safe_loans'] != np.array(prediction)).values.sum() *1. / len(data)
"""
Explanation: Quiz question:
What was the feature that my_decision_tree first split on while making the prediction for test_data[0]?
Quiz question:
What was the first feature that lead to a right split of test_data[0]?
Quiz question:
What was the last feature split on before reaching a leaf node for test_data[0]?
Answer:
term_36 months
Answer:
grade_D
Answer:
grade_D
Evaluating your decision tree
16. Now, we will write a function to evaluate a decision tree by computing the classification error of the tree on the given dataset. Write a function called evaluate_classification_error that takes in as input:
tree (as described above)
data (a data frame of data points)
This function should return a prediction (class label) for each row in data using the decision tree. Your code should be analogous to
End of explanation
"""
evaluate_classification_error(my_decision_tree, test_data)
"""
Explanation: 17. Now, use this function to evaluate the classification error on the test set.
End of explanation
"""
def print_stump(tree, name = 'root'):
split_name = tree['splitting_feature'] # split_name is something like 'term. 36 months'
if split_name is None:
print "(leaf, label: %s)" % tree['prediction']
return None
split_feature, split_value = split_name.split('_',1)
print ' %s' % name
print ' |---------------|----------------|'
print ' | |'
print ' | |'
print ' | |'
print ' [{0} == 0] [{0} == 1] '.format(split_name)
print ' | |'
print ' | |'
print ' | |'
print ' (%s) (%s)' \
% (('leaf, label: ' + str(tree['left']['prediction']) if tree['left']['is_leaf'] else 'subtree'),
('leaf, label: ' + str(tree['right']['prediction']) if tree['right']['is_leaf'] else 'subtree'))
"""
Explanation: Quiz Question:
Rounded to 2nd decimal point, what is the classification error of my_decision_tree on the test_data?
Answer:
0.38
Printing out a decision stump
18. As discussed in the lecture, we can print out a single decision stump (printing out the entire tree is left as an exercise to the curious reader). Here we provide Python code to visualize a decision stump. If you are using different software, make sure your code is analogous to:
End of explanation
"""
print_stump(my_decision_tree)
"""
Explanation: 19. Using this function, we can print out the root of our decision tree:
End of explanation
"""
print_stump(my_decision_tree['left'], my_decision_tree['splitting_feature'])
print_stump(my_decision_tree['left']['left'], my_decision_tree['left']['splitting_feature'])
print_stump(my_decision_tree['right'], my_decision_tree['splitting_feature'])
print_stump(my_decision_tree['right']['right'], my_decision_tree['right']['splitting_feature'])
"""
Explanation: Quiz Question:
What is the feature that is used for the split at the root node?
Answer:
term_ 36 months
Exploring the intermediate left subtree
The tree is a recursive dictionary, so we do have access to all the nodes! We can use
my_decision_tree['left'] to go left
my_decision_tree['right'] to go right
20. We can print out the left subtree by running the code
End of explanation
"""
|
Kaggle/learntools | notebooks/bqml/raw/ex1.ipynb | apache-2.0 | # Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.bqml.ex1 import *
# Set your own project id here
PROJECT_ID = ____ # a string, like 'kaggle-bigquery-240818'
from google.cloud import bigquery
client = bigquery.Client(project=PROJECT_ID, location="US")
dataset = client.create_dataset('model_dataset', exists_ok=True)
from google.cloud.bigquery import magics
from kaggle.gcp import KaggleKernelCredentials
magics.context.credentials = KaggleKernelCredentials()
magics.context.project = PROJECT_ID
"""
Explanation: Stocking rental bikes
You stock bikes for a bike rental company in Austin, ensuring stations have enough bikes for all their riders. You decide to build a model to predict how many riders will start from each station during each hour, capturing patterns in seasonality, time of day, day of the week, etc.
To get started, create a project in GCP and connect to it by running the code cell below. Make sure you have connected the kernel to your GCP account in Settings.
End of explanation
"""
# Uncomment the following line to check the solution once you've thought about the answer
# q_1.solution()
"""
Explanation: Linear Regression
Your dataset is quite large. BigQuery is especially efficient with large datasets, so you'll use BigQuery-ML (called BQML) to build your model. BQML uses a "linear regression" model when predicting numeric outcomes, like the number of riders.
1) Training vs testing
You'll want to test your model on data it hasn't seen before (for reasons described in the Intro to Machine Learning Micro-Course. What do you think is a good approach to splitting the data? What data should we use to train, what data should we use for test the model?
End of explanation
"""
# Write your query to retrieve the training data
query = ____
# Create the query job. No changes needed below this line
query_job = client.query(query)
# API request - run the query, and return DataFrame. No changes needed
model_data = query_job.to_dataframe()
q_2.check()
# uncomment the lines below to get a hint or solution
# q_2.hint()
# q_2.solution()
## My solution code
query = """
SELECT start_station_name,
TIMESTAMP_TRUNC(start_time, HOUR) as start_hour,
COUNT(bikeid) as num_rides
FROM `bigquery-public-data.austin_bikeshare.bikeshare_trips`
WHERE start_time < "2018-01-01"
GROUP BY start_station_name, start_hour
"""
query_job = client.query(query)
model_data = query_job.to_dataframe()
"""
Explanation: Training data
First, you'll write a query to get the data for model-building. You can use the public Austin bike share dataset from the bigquery-public-data.austin_bikeshare.bikeshare_trips table. You predict the number of rides based on the station where the trip starts and the hour when the trip started. Use the TIMESTAMP_TRUNC function to truncate the start time to the hour.
2) Exercise: Query the training data
Write the query to retrieve your training data. The fields should be:
1. The start_station_name
2. A time trips start, to the nearest hour. Get this with TIMESTAMP_TRUNC(start_time, HOUR) as start_hour
3. The number of rides starting at the station during the hour. Call this num_rides.
Select only the data before 2018-01-01 (so we can save data from 2018 as testing data.)
End of explanation
"""
model_data.head(20)
"""
Explanation: You'll want to inspect your data to ensure it looks like what you expect. Run the line below to get a quick view of the data, and feel free to explore it more if you'd like (if you don't know hot to do that, the Pandas micro-course) might be helpful.
End of explanation
"""
# Write your query to create and train the model
query = ____
# Create the query job. No changes needed below this line
query_job = client.query(query)
# API request - run the query. Models return an empty table. No changes needed
query_job.result()
## My solution
query = """
CREATE OR REPLACE MODEL `model_dataset.bike_trips`
OPTIONS(model_type='linear_reg',
input_label_cols=['num_rides'],
optimize_strategy='batch_gradient_descent') AS
SELECT COUNT(bikeid) as num_rides,
start_station_name,
TIMESTAMP_TRUNC(start_time, HOUR) as start_hour
FROM `bigquery-public-data.austin_bikeshare.bikeshare_trips`
WHERE start_time < "2018-01-01"
GROUP BY start_station_name, start_hour
"""
query_job = client.query(query)
# API request - run the query. Models return an empty table
query_job.result()
q_3.check()
# q_3.solution()
"""
Explanation: Model creation
Now it's time to turn this data into a model. You'll use the CREATE MODEL statement that has a structure like:
sql
CREATE OR REPLACE MODEL`model_dataset.bike_trips`
OPTIONS(model_type='linear_reg',
input_label_cols=['label_col'],
optimize_strategy='batch_gradient_descent') AS
-- training data query goes here
SELECT ...
FROM ...
WHERE ...
GROUP BY ...
The model_type and optimize_strategy shown here are good parameters to use in general for predicting numeric outcomes with BQML.
Tip: Using CREATE OR REPLACE MODEL rather than just CREATE MODEL ensures you don't get an error if you want to run this command again without first deleting the model you've created.
3) Exercise: Create and train the model
Below, write your query to create and train a linear regression model on the training data.
End of explanation
"""
# Write your query to evaluate the model
query = "____"
query_job = client.query(query)
# API request - run the query
evaluation_results = query_job.to_dataframe()
evaluation_results
q_4.check()
## My solution
query = """
SELECT *
FROM
ML.EVALUATE(MODEL `model_dataset.bike_trips`, (
SELECT COUNT(bikeid) as num_rides,
start_station_name,
TIMESTAMP_TRUNC(start_time, HOUR) as start_hour
FROM `bigquery-public-data.austin_bikeshare.bikeshare_trips`
WHERE start_time >= "2018-01-01"
GROUP BY start_station_name, start_hour
))
"""
query_job = client.query(query)
# API request - run the query
evaluation_results = query_job.to_dataframe()
evaluation_results
"""
Explanation: 4) Exercise: Model evaluation
Now that you have a model, evaluate it's performance on data from 2018. If you need help with
End of explanation
"""
## Thought question answer here
"""
Explanation: You should see that the r^2 score here is negative. Negative values indicate that the model is worse than just predicting the mean rides for each example.
5) Theories for poor performance
Why would your model be doing worse than making the most simple prediction?
Answer: It's possible there's something broken in the model algorithm. Or the data for 2018 is much different than the historical data before it.
End of explanation
"""
# Write the query here
query = "____"
query_job = client.query(query)
# API request - run the query
evaluation_results = query_job.to_dataframe()
evaluation_results
## My solution
query = """
SELECT AVG(ROUND(predicted_num_rides)) as predicted_avg_riders,
AVG(num_rides) as true_avg_riders
FROM
ML.PREDICT(MODEL `model_dataset.bike_trips`, (
SELECT COUNT(bikeid) as num_rides,
start_station_name,
TIMESTAMP_TRUNC(start_time, HOUR) as start_hour
FROM `bigquery-public-data.austin_bikeshare.bikeshare_trips`
WHERE start_time >= "2018-01-01"
AND start_station_name = "22nd & Pearl"
GROUP BY start_station_name, start_hour
))
-- ORDER BY start_hour
"""
query_job = client.query(query)
# API request - run the query
evaluation_results = query_job.to_dataframe()
evaluation_results
"""
Explanation: 6) Exercise: Looking at predictions
A good way to figure out where your model is going wrong is to look closer at a small set of predictions. Use your model to predict the number of rides for the 22nd & Pearl station in 2018. Compare the mean values of predicted vs actual riders.
End of explanation
"""
# Write the query here
query = "____"
# Create the query job
query_job = ____
# API request - run the query and return a pandas DataFrame
evaluation_results = ____
evaluation_results
## My solution
query = """
WITH daily_rides AS (
SELECT COUNT(bikeid) AS num_rides,
start_station_name,
EXTRACT(DAYOFYEAR from start_time) AS doy,
EXTRACT(YEAR from start_time) AS year
FROM `bigquery-public-data.austin_bikeshare.bikeshare_trips`
GROUP BY start_station_name, doy, year
ORDER BY year
),
station_averages AS (
SELECT avg(num_rides) AS avg_riders, start_station_name, year
FROM daily_rides
GROUP BY start_station_name, year)
SELECT avg(avg_riders) AS daily_rides_per_station, year
FROM station_averages
GROUP BY year
ORDER BY year
"""
query_job = client.query(query)
# API request - run the query
evaluation_results = query_job.to_dataframe()
evaluation_results
"""
Explanation: What you should see here is that the model is underestimating the number of rides by quite a bit.
7) Exercise: Average daily rides per station
Either something is wrong with the model or something surprising is happening in the 2018 data.
What could be happening in the data? Write a query to get the average number of riders per station for each year in the dataset and order by the year so you can see the trend. You can use the EXTRACT method to get the day and year from the start time timestamp.
End of explanation
"""
## Thought question answer here
"""
Explanation: 8) What do your results tell you?
Given the daily average riders per station over the years, does it make sense that the model is failing?
Answer: The daily average riders went from around 10 in 2017 to over 16 in 2018. This change in the bikesharing program caused your model to underestimate the number of riders in 2018. Unexpected things can happen when you predict the future in an ever-changing area. Knowledge of a topic can be helpful here, and if you knew enough about the program, you might be able to predict (or at least explain) these types of changes over time.
End of explanation
"""
# Write your query to create and train the model
query = "____"
# Create the query job
query_job = ____ # Your code goes here
# API request - run the query. Models return an empty table
____ # Your code goes here
## My solution
query = """
CREATE OR REPLACE MODEL `model_dataset.bike_trips_2017`
OPTIONS(model_type='linear_reg',
input_label_cols=['num_rides'],
optimize_strategy='batch_gradient_descent') AS
SELECT COUNT(bikeid) as num_rides,
start_station_name,
TIMESTAMP_TRUNC(start_time, HOUR) as start_hour
FROM `bigquery-public-data.austin_bikeshare.bikeshare_trips`
WHERE start_time < "2017-01-01"
GROUP BY start_station_name, start_hour
"""
query_job = client.query(query)
# API request - run the query. Models return an empty table
query_job.result()
"""
Explanation: 9) A Better Scenario
It's disappointing that your model was so inaccurate on 2018 data. Fortunately, this issue of the world changing over time is the exception rather than the rule.
Your model was built on data that went through the end of 2016. So you can also see how the model performs on data from 2017. First, create a model
End of explanation
"""
# Write your query to evaluate the model
query = "____"
query_job = client.query(query)
# API request - run the query. Models return an empty table
query_job.result()
query = """
SELECT *
FROM
ML.EVALUATE(MODEL `model_dataset.bike_trips_2017`, (
SELECT COUNT(bikeid) as num_rides,
start_station_name,
TIMESTAMP_TRUNC(start_time, HOUR) as start_hour
FROM `bigquery-public-data.austin_bikeshare.bikeshare_trips`
WHERE start_time >= "2017-01-01" AND start_time < "2018-01-01"
GROUP BY start_station_name, start_hour
))
"""
query_job = client.query(query)
# API request - run the query
evaluation_results = query_job.to_dataframe()
evaluation_results
"""
Explanation: Now write the query to evaluate your model using data from 2017
End of explanation
"""
|
vikashvverma/machine-learning | mladvanced/Project/Capstone/kernel.ipynb | mit | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
from glob import glob
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.utils import np_utils
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input/flowers/flowers"))
# Any results you write to the current directory are saved as output.
"""
Explanation: Steps
This kernel is designed in following ways:
Step 1: Data Preprocessing
Step 2: Develop a Benchmark model
Step 3: Develop a CNN architecture from scratch
Step 4: Develop a CNN using Transfer Learning
<a id='step1'></a>
Step 1: Data Preprocessing
Import Libraries
Here we import a set of useful libraries
End of explanation
"""
# Make a parent directory `data` and three sub directories `train`, `valid` and 'test'
%rm -rf data # Remove if already present
%mkdir -p data/train/daisy
%mkdir -p data/train/tulip
%mkdir -p data/train/sunflower
%mkdir -p data/train/rose
%mkdir -p data/train/dandelion
%mkdir -p data/valid/daisy
%mkdir -p data/valid/tulip
%mkdir -p data/valid/sunflower
%mkdir -p data/valid/rose
%mkdir -p data/valid/dandelion
%mkdir -p data/test/daisy
%mkdir -p data/test/tulip
%mkdir -p data/test/sunflower
%mkdir -p data/test/rose
%mkdir -p data/test/dandelion
%ls data/train
%ls data/valid
%ls data/test
"""
Explanation: Reorganize the data
All the flowers are stored in a directory flower and separated based on the category in sub-directory.
We can reorganize the data in such a way that we can easily use load_files from sklearn.
The flowers are present in dataset as follows:
flowers
│
└───Daisy
│
└───Dandelion
|
└───Rose
│
└───Sunflower
|
└───Tulip
We can create dataset for training, validation and testing to easily use load_files from sklearn
data
│
└───train
| │
| └───Daisy
| │
| └───Dandelion
| |
| └───Rose
| │
| └───Sunflower
| |
| └───Tulip
└───valid
| │
| └───Daisy
| │
| └───Dandelion
| |
| └───Rose
| │
| └───Sunflower
| |
| └───Tulip
└───test
│
└───Daisy
│
└───Dandelion
|
└───Rose
│
└───Sunflower
|
└───Tulip
End of explanation
"""
base_dir = "../input/flowers/flowers"
categories = os.listdir(base_dir)
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
from shutil import copyfile
plt.rcParams["figure.figsize"] = (20,3)
def train_valid_test(files):
"""This function splits the files in training, validation and testing sets with 60%, 20%
and 20% of data in each respectively"""
train_fles = files[:int(len(files)*0.6)]
valid_files = files[int(len(files)*0.6):int(len(files)*0.8)]
test_files = files[int(len(files)*0.8):]
return train_fles, valid_files, test_files
def copy_files(files, src, dest):
"""This function copy files from src to dest"""
for file in files:
copyfile("{}/{}".format(src, file), "{}/{}".format(dest, file))
def plot_images(category, images):
"""This method plots five images from a category"""
for i in range(len(images)):
plt.subplot(1,5,i+1)
plt.title(category)
image = mpimg.imread("{}/{}/{}".format(base_dir, category, images[i]))
plt.imshow(image)
plt.show()
total_images = []
for category in categories:
images = os.listdir("{}/{}".format(base_dir, category))
random.shuffle(images)
filtered_images = [image for image in images if image not in ['flickr.py', 'flickr.pyc', 'run_me.py']]
total_images.append(len(filtered_images))
train_images, valid_images, test_images = train_valid_test(filtered_images)
copy_files(train_images, "{}/{}".format(base_dir, category), "./data/train/{}".format(category))
copy_files(valid_images, "{}/{}".format(base_dir, category), "./data/valid/{}".format(category))
copy_files(test_images, "{}/{}".format(base_dir, category), "./data/test/{}".format(category))
plot_images(category, images[:5])
"""
Explanation: Find all the categories of the flowers
End of explanation
"""
print("Total images: {}".format(np.sum(total_images)))
for i in range(len(categories)):
print("{}: {}".format(categories[i], total_images[i]))
y_pos = np.arange(len(categories))
plt.bar(y_pos, total_images, width=0.2,color='b',align='center')
plt.xticks(y_pos, categories)
plt.ylabel("Image count")
plt.title("Image count in different categories")
plt.show()
"""
Explanation: Statistics of flowers
End of explanation
"""
# define function to load train, valid and test datasets
def load_dataset(path):
data = load_files(path)
flower_files = np.array(data['filenames'])
print(data['target_names'])
flower_targets = np_utils.to_categorical(np.array(data['target']), 5)
return flower_files, flower_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('data/train')
valid_files, valid_targets = load_dataset('data/valid')
test_files, test_targets = load_dataset('data/test')
print('There are %d total flower categories.' % len(categories))
print('There are %s total flower images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training flower images.' % len(train_files))
print('There are %d validation flower images.' % len(valid_files))
print('There are %d test flower images.' % len(test_files))
"""
Explanation: Observations
There are 4323 total images with approximately similar distribution in each category.
The dataset does not seem to be imbalanced.
Accuracy can be used as a metric for model evaulation.
End of explanation
"""
from keras.preprocessing import image
from tqdm import tqdm
"""
Explanation: Data Transformation
Keras' CNNs require a 4D tensor as input with the shape as (nb_samples, rows, columns, channels) where
- nb_samples: total number of samples or images
- rows: number of rows of each image
- columns: number of columns of each image
- channels: number of channels of each image
End of explanation
"""
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
"""
Explanation: Create a 4D tensor
The path_to_tensor function below takes a color image as input and returns a 4D tensor suitable for supplying to Keras CNN. The function first loads the image and then resizes it 224x224 pixels. The image then, is converted to an array and resized to a 4D tensor. The returned tensor will always have a shape of (1, 224, 224, 3) as we are dealing with a single image only in this function.
End of explanation
"""
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
"""
Explanation: The ptahs_to_tensor applies path_to_tensor to all images and returns a list of tensors.
End of explanation
"""
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
"""
Explanation: Pre-process the Data
Rescale the images by dividing every pixel in every image by 255.
End of explanation
"""
simple_model = Sequential()
print(train_tensors.shape)
### Define the architecture of the simple model.
simple_model.add(Conv2D(filters=16, kernel_size=2, strides=1, activation='relu', input_shape=(224,224,3)))
simple_model.add(GlobalAveragePooling2D())
simple_model.add(Dense(5, activation='softmax'))
simple_model.summary()
"""
Explanation: <a id="step2"></a>
Step 2: Develop a Benchmark model
Use a simple CNN to create a benchmark model.
End of explanation
"""
simple_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Create a `saved_models` directory for saving best model
%mkdir -p saved_models
from keras.callbacks import ModelCheckpoint
### number of epochs
epochs = 50
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.simple.hdf5',
verbose=1, save_best_only=True)
simple_model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
simple_model.load_weights('saved_models/weights.best.simple.hdf5')
# get index of predicted flower category for each image in test set
flower_predictions = [np.argmax(simple_model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(flower_predictions)==np.argmax(test_targets, axis=1))/len(flower_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
"""
Explanation: Making Predictions with the simple model
End of explanation
"""
model = Sequential()
print(train_tensors.shape)
### Define architecture.
model.add(Conv2D(filters=16, kernel_size=2, strides=1, activation='relu', input_shape=(224,224,3)))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(filters=32, kernel_size=2, strides=1, activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(filters=64, kernel_size=2, strides=1, activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(GlobalAveragePooling2D())
model.add(Dense(5, activation='softmax'))
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
### number of epochs
epochs = 50
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
"""
Explanation: Benchmark model's performance
The accuracy obtained from the benchmark model is 41.57%.
<a id="step3"></a>
Step 3: Develop a CNN architecture from scratch
End of explanation
"""
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
"""
Explanation: Load best weight of the model
End of explanation
"""
# get index of predicted flower category for each image in test set
flower_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(flower_predictions)==np.argmax(test_targets, axis=1))/len(flower_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
"""
Explanation: Get the accuracy of the model
End of explanation
"""
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from keras.models import Model
inception_resnet = InceptionResNetV2(weights="imagenet",include_top=False, input_shape=(224,224,3))
for layer in inception_resnet.layers[:5]:
layer.trainable = False
output_model = inception_resnet.output
output_model = Flatten()(output_model)
output_model = Dense(200, activation='relu')(output_model)
output_model = Dropout(0.5)(output_model)
output_model = Dense(200, activation='relu')(output_model)
output_model = Dense(5, activation='softmax')(output_model)
model = Model(inputs=inception_resnet.input, outputs=output_model)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
### number of epochs
epochs = 50
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.inception_resnetv2.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
"""
Explanation: <a id="step4"></a>
Step 4: Develop a CNN using Transfer Learning
End of explanation
"""
### load best weights
model.load_weights('saved_models/weights.best.inception_resnetv2.hdf5')
"""
Explanation: Load the best weight of the model
End of explanation
"""
# get index of predicted flower category for each image in test set
flower_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# report test accuracy
test_accuracy = 100*np.sum(np.array(flower_predictions)==np.argmax(test_targets, axis=1))/len(flower_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
for i in range(5):
predicted = np.argmax(model.predict(np.expand_dims(test_tensors[i], axis=0)))
actual = np.argmax(test_targets[i])
print("Predicted: {}, Actual: {}, Name: {}".format(predicted, actual, test_files[i].split("/")[2]))
image = mpimg.imread(test_files[i])
plt.imshow(image)
plt.show()
"""
Explanation: Get the accuracy on test set
End of explanation
"""
%rm -rf data
"""
Explanation: Delete created directory and files. It's necessary to have only few files otherwise Kaggle won't allow to commit a kernel.
End of explanation
"""
|
lewisamarshall/ionize | new_tutorial.ipynb | gpl-2.0 | # Setup
from __future__ import print_function, absolute_import, division
import ionize
import pprint
from matplotlib import pyplot as plot
%matplotlib inline
import numpy as np
np.set_printoptions(precision=3)
"""
Explanation: ionize Tutorial
ionize is a Python module for calculating the properties of ions and electrolyte solutions.
End of explanation
"""
acid = ionize.Ion('myAcid', [-1], [5], [-25e-9])
base = ionize.Ion('myBase', [1], [8], [20e-9])
print(acid) # The string includes only the class and name.
print(repr(base)) # The representation contains enough information to reconstruct the ion.
"""
Explanation: Ions
The basic building block that ionize is the a single ionic species. Small ions (as opposed to ion complexes or polyions) are represented by the Ion class.
An Ion has the following properties that can be set on initialization.
name
valence (as an interable of integers)
reference_pKa (as an iterable of floats)
reference_mobility (as an interable of floats, in m^2/V/s)
reference_temperature, the temperature at which other parameters are measured, in degC (default = 25 degC)
enthalpy, the change in enthalpy on ionization, as an interable, in J/mol/K
heat_capacity, the change in heat_capacity on ionization, as an iterable, in J/mol/K^2
molecular_weight, in Daltons
alias, an iterable of alternate names
nightingale_data, data fitting change in mobility with temperature
Only the name, valence, pKa, and mobility are required parameters. All other parameters are optional.
End of explanation
"""
print('myAcid Ka at (I=0 M) =', acid.acidity())
print('myAcid Ka at (I=0.5 M) =', acid.acidity(ionic_strength=0.5))
pH = np.linspace(0,14)
for I in [None, 0., 0.001, 0.01, 0.1]:
mu = [base.mobility(p, I) for p in pH]
if I is not None:
label = 'I={} M'.format(I)
else:
label = 'I=None'
plot.plot(pH, mu, label=label)
plot.xlabel('pH'); plot.xlim(0, 14)
plot.ylabel('effective mobility (m^2/v/s)'); plot.ylim(-.1e-8, 2.1e-8)
plot.legend()
plot.show()
"""
Explanation: The guranteed interfaces of ion species are:
- charge(pH, ionic_strength, temperature)
- Return the charge per ion at the specified condition
- mobility(pH, ionic_strength, temperature)
- Return the effective mobility in m^2/V/s at the specified conditions
- diffusivity(pH, ionic_strength, temperature)
- Return the diffusivity in m^2/s for the ion
- molar_conductivity(pH, ionic_strength, temperature)
- Return the molar conductivity in S/M
- separability(other, pH, ionic_strength, temperature)
- Return the separability between the ion and another ion at the specified condition.
- serialize(nested, compact)
- Return a serialized JSON representation.
- save(filename)
- Save the ion to a file in JSON format.
In addition, BaseIon subclasses usually have some unique interfaced.
End of explanation
"""
db = ionize.Database()
histidine = db['histidine']
print(repr(histidine))
for ionic_strength in (None, 0):
mu_histidine = [histidine.mobility(p, ionic_strength=ionic_strength) for p in pH]
plot.plot(pH, mu_histidine, label="I={}".format(ionic_strength))
plot.xlabel('pH'); plot.xlim([0, 14])
plot.ylabel('effective mobility (m^2/v/s)')
plot.legend()
plot.show()
"""
Explanation: Note the difference between ionic_strength parameters here. If ionic_strength is 0, the numerical value of 0 is used in each calculation. However, it is impossible to have a solution of pH 0 with ionic_strength of 0.
When the default value of None is used for ionic_strength, ionize uses the minimum ionic strength at the selected pH.
ionize database
Individually initializing ions is error-prone and time-consuming. To simplify the process, load ions from
the database by initializing the database, and accessing the database like a dictionary.
End of explanation
"""
print("Search results for 'amino'\n--------------------------")
pprint.pprint(db.search('amino'))
print("\nSearch results for 'chloric'\n----------------------------")
pprint.pprint(db.search('chloric'))
print("\nSearch results for 'per'\n------------------------")
pprint.pprint(db.search('per'))
print('\nOh, copper is what I was looking for.')
print(db.load('copper'))
"""
Explanation: search()
You can also search for ions in the database by name using search() method of Database. search() will return a list of ion names, so load the ion when you find what you want.
End of explanation
"""
print(len(db.data), 'ions in database.')
"""
Explanation: Other db functions
You can get the database data as a dictionary using the data method.
End of explanation
"""
buffer=ionize.Solution([db['tris'], db['chloride']], [0.1, 0.085])
print('pH =', buffer.pH)
print('I =', buffer.ionic_strength, 'M')
print('conductivity =', buffer.conductivity(), 'S/m')
print('buffering capacity =', buffer.buffering_capacity(), 'M')
print('debye length =', buffer.debye(), 'm')
"""
Explanation: Solution
Getting the properties of a single ionic species in solution is useful, but the real challenge of dealing with aqueous solutions of ions is finding properties based on the equilibrium state of multiple ionic species. ionize can perform those calculations using the Solution class. Solution objects are initialized using ionize.Solution(ions, concentrations), where ions is a list of Ion objects and concentration is a list concentrations of the ions, with concentrations in molar.
End of explanation
"""
sol = ionize.Solution(['bis-tris', 'acetic acid'], [0.1, 0.03])
print([ion.name for ion in sol.ions])
print(sol.concentration('acetic acid'))
"""
Explanation: Solutions can be initialized with ion names instead of ion objects.
End of explanation
"""
c_tris = 0.1
c_hcl = np.linspace(0.0, 0.2, 50)
t_pH = [ionize.Solution(['tris', 'hydrochloric acid'], [c_tris, c_h]).pH for c_h in c_hcl]
plot.plot(c_hcl/c_tris, t_pH)
plot.xlabel('[HCl]/[Tris]')
plot.ylabel('pH')
plot.show()
"""
Explanation: We can iterate through solutions to calculate the pH of a titration between two ions.
End of explanation
"""
water = ionize.Solution()
print('I =', water.ionic_strength, 'M')
print('pH =', water.pH)
print('conductivity =', water.conductivity(), 'S/m')
"""
Explanation: A Solution can also be initialized without ions, e.g. as water.
End of explanation
"""
print('Stock:', buffer)
dilution = 0.5 * buffer + 0.5 * water
print('Dilution:', dilution)
"""
Explanation: A Solution can also be added and multiplied. This can be useful when calculating the results of diltuions, as below.
End of explanation
"""
buff = ionize.Solution(['tris'], 0.1)
print(buff.titrate('hydrochloric acid', 8.2))
print(buff.titrate('hydrochloric acid', 3))
print(buff.conductivity())
print(repr(buff.titrate('hydrochloric acid', 3, titration_property = 'conductivity')))
print(repr(buff.titrate('hydrochloric acid', 8)))
"""
Explanation: Solutions can be titrated to a specified pH. To do so, make a solution, and then specify a titrant, a property, and a target.
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.12.2/examples/notebooks/generated/mixed_lm_example.ipynb | bsd-3-clause | %matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.tools.sm_exceptions import ConvergenceWarning
"""
Explanation: Linear Mixed Effects Models
End of explanation
"""
data = sm.datasets.get_rdataset('dietox', 'geepack').data
md = smf.mixedlm("Weight ~ Time", data, groups=data["Pig"])
mdf = md.fit(method=["lbfgs"])
print(mdf.summary())
"""
Explanation: Note: The R code and the results in this notebook has been converted to markdown so that R is not required to build the documents. The R results in the notebook were computed using R 3.5.1 and lme4 1.1.
ipython
%load_ext rpy2.ipython
ipython
%R library(lme4)
array(['lme4', 'Matrix', 'tools', 'stats', 'graphics', 'grDevices',
'utils', 'datasets', 'methods', 'base'], dtype='<U9')
Comparing R lmer to statsmodels MixedLM
The statsmodels imputation of linear mixed models (MixedLM) closely follows the approach outlined in Lindstrom and Bates (JASA 1988). This is also the approach followed in the R package LME4. Other packages such as Stata, SAS, etc. should also be consistent with this approach, as the basic techniques in this area are mostly mature.
Here we show how linear mixed models can be fit using the MixedLM procedure in statsmodels. Results from R (LME4) are included for comparison.
Here are our import statements:
Growth curves of pigs
These are longitudinal data from a factorial experiment. The outcome variable is the weight of each pig, and the only predictor variable we will use here is "time". First we fit a model that expresses the mean weight as a linear function of time, with a random intercept for each pig. The model is specified using formulas. Since the random effects structure is not specified, the default random effects structure (a random intercept for each group) is automatically used.
End of explanation
"""
md = smf.mixedlm("Weight ~ Time", data, groups=data["Pig"], re_formula="~Time")
mdf = md.fit(method=["lbfgs"])
print(mdf.summary())
"""
Explanation: Here is the same model fit in R using LMER:
ipython
%%R
data(dietox, package='geepack')
ipython
%R print(summary(lmer('Weight ~ Time + (1|Pig)', data=dietox)))
```
Linear mixed model fit by REML ['lmerMod']
Formula: Weight ~ Time + (1 | Pig)
Data: dietox
REML criterion at convergence: 4809.6
Scaled residuals:
Min 1Q Median 3Q Max
-4.7118 -0.5696 -0.0943 0.4877 4.7732
Random effects:
Groups Name Variance Std.Dev.
Pig (Intercept) 40.39 6.356
Residual 11.37 3.371
Number of obs: 861, groups: Pig, 72
Fixed effects:
Estimate Std. Error t value
(Intercept) 15.72352 0.78805 19.95
Time 6.94251 0.03339 207.94
Correlation of Fixed Effects:
(Intr)
Time -0.275
```
Note that in the statsmodels summary of results, the fixed effects and random effects parameter estimates are shown in a single table. The random effect for animal is labeled "Intercept RE" in the statsmodels output above. In the LME4 output, this effect is the pig intercept under the random effects section.
There has been a lot of debate about whether the standard errors for random effect variance and covariance parameters are useful. In LME4, these standard errors are not displayed, because the authors of the package believe they are not very informative. While there is good reason to question their utility, we elected to include the standard errors in the summary table, but do not show the corresponding Wald confidence intervals.
Next we fit a model with two random effects for each animal: a random intercept, and a random slope (with respect to time). This means that each pig may have a different baseline weight, as well as growing at a different rate. The formula specifies that "Time" is a covariate with a random coefficient. By default, formulas always include an intercept (which could be suppressed here using "0 + Time" as the formula).
End of explanation
"""
.294 / (19.493 * .416)**.5
md = smf.mixedlm("Weight ~ Time", data, groups=data["Pig"],
re_formula="~Time")
free = sm.regression.mixed_linear_model.MixedLMParams.from_components(np.ones(2),
np.eye(2))
mdf = md.fit(free=free, method=["lbfgs"])
print(mdf.summary())
"""
Explanation: Here is the same model fit using LMER in R:
ipython
%R print(summary(lmer("Weight ~ Time + (1 + Time | Pig)", data=dietox)))
```
Linear mixed model fit by REML ['lmerMod']
Formula: Weight ~ Time + (1 + Time | Pig)
Data: dietox
REML criterion at convergence: 4434.1
Scaled residuals:
Min 1Q Median 3Q Max
-6.4286 -0.5529 -0.0416 0.4841 3.5624
Random effects:
Groups Name Variance Std.Dev. Corr
Pig (Intercept) 19.493 4.415
Time 0.416 0.645 0.10
Residual 6.038 2.457
Number of obs: 861, groups: Pig, 72
Fixed effects:
Estimate Std. Error t value
(Intercept) 15.73865 0.55012 28.61
Time 6.93901 0.07982 86.93
Correlation of Fixed Effects:
(Intr)
Time 0.006
```
The random intercept and random slope are only weakly correlated $(0.294 / \sqrt{19.493 * 0.416} \approx 0.1)$. So next we fit a model in which the two random effects are constrained to be uncorrelated:
End of explanation
"""
data = sm.datasets.get_rdataset("Sitka", "MASS").data
endog = data["size"]
data["Intercept"] = 1
exog = data[["Intercept", "Time"]]
"""
Explanation: The likelihood drops by 0.3 when we fix the correlation parameter to 0. Comparing 2 x 0.3 = 0.6 to the chi^2 1 df reference distribution suggests that the data are very consistent with a model in which this parameter is equal to 0.
Here is the same model fit using LMER in R (note that here R is reporting the REML criterion instead of the likelihood, where the REML criterion is twice the log likelihood):
ipython
%R print(summary(lmer("Weight ~ Time + (1 | Pig) + (0 + Time | Pig)", data=dietox)))
```
Linear mixed model fit by REML ['lmerMod']
Formula: Weight ~ Time + (1 | Pig) + (0 + Time | Pig)
Data: dietox
REML criterion at convergence: 4434.7
Scaled residuals:
Min 1Q Median 3Q Max
-6.4281 -0.5527 -0.0405 0.4840 3.5661
Random effects:
Groups Name Variance Std.Dev.
Pig (Intercept) 19.8404 4.4543
Pig.1 Time 0.4234 0.6507
Residual 6.0282 2.4552
Number of obs: 861, groups: Pig, 72
Fixed effects:
Estimate Std. Error t value
(Intercept) 15.73875 0.55444 28.39
Time 6.93899 0.08045 86.25
Correlation of Fixed Effects:
(Intr)
Time -0.086
```
Sitka growth data
This is one of the example data sets provided in the LMER R library. The outcome variable is the size of the tree, and the covariate used here is a time value. The data are grouped by tree.
End of explanation
"""
md = sm.MixedLM(endog, exog, groups=data["tree"], exog_re=exog["Intercept"])
mdf = md.fit()
print(mdf.summary())
"""
Explanation: Here is the statsmodels LME fit for a basic model with a random intercept. We are passing the endog and exog data directly to the LME init function as arrays. Also note that endog_re is specified explicitly in argument 4 as a random intercept (although this would also be the default if it were not specified).
End of explanation
"""
exog_re = exog.copy()
md = sm.MixedLM(endog, exog, data["tree"], exog_re)
mdf = md.fit()
print(mdf.summary())
"""
Explanation: Here is the same model fit in R using LMER:
ipython
%R
data(Sitka, package="MASS")
print(summary(lmer("size ~ Time + (1 | tree)", data=Sitka)))
```
Linear mixed model fit by REML ['lmerMod']
Formula: size ~ Time + (1 | tree)
Data: Sitka
REML criterion at convergence: 164.8
Scaled residuals:
Min 1Q Median 3Q Max
-2.9979 -0.5169 0.1576 0.5392 4.4012
Random effects:
Groups Name Variance Std.Dev.
tree (Intercept) 0.37451 0.612
Residual 0.03921 0.198
Number of obs: 395, groups: tree, 79
Fixed effects:
Estimate Std. Error t value
(Intercept) 2.2732443 0.0878955 25.86
Time 0.0126855 0.0002654 47.80
Correlation of Fixed Effects:
(Intr)
Time -0.611
```
We can now try to add a random slope. We start with R this time. From the code and output below we see that the REML estimate of the variance of the random slope is nearly zero.
ipython
%R print(summary(lmer("size ~ Time + (1 + Time | tree)", data=Sitka)))
```
Linear mixed model fit by REML ['lmerMod']
Formula: size ~ Time + (1 + Time | tree)
Data: Sitka
REML criterion at convergence: 153.4
Scaled residuals:
Min 1Q Median 3Q Max
-2.7609 -0.5173 0.1188 0.5270 3.5466
Random effects:
Groups Name Variance Std.Dev. Corr
tree (Intercept) 2.217e-01 0.470842
Time 3.288e-06 0.001813 -0.17
Residual 3.634e-02 0.190642
Number of obs: 395, groups: tree, 79
Fixed effects:
Estimate Std. Error t value
(Intercept) 2.273244 0.074655 30.45
Time 0.012686 0.000327 38.80
Correlation of Fixed Effects:
(Intr)
Time -0.615
convergence code: 0
Model failed to converge with max|grad| = 0.793203 (tol = 0.002, component 1)
Model is nearly unidentifiable: very large eigenvalue
- Rescale variables?
```
If we run this in statsmodels LME with defaults, we see that the variance estimate is indeed very small, which leads to a warning about the solution being on the boundary of the parameter space. The regression slopes agree very well with R, but the likelihood value is much higher than that returned by R.
End of explanation
"""
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
likev = mdf.profile_re(0, 're', dist_low=0.1, dist_high=0.1)
"""
Explanation: We can further explore the random effects structure by constructing plots of the profile likelihoods. We start with the random intercept, generating a plot of the profile likelihood from 0.1 units below to 0.1 units above the MLE. Since each optimization inside the profile likelihood generates a warning (due to the random slope variance being close to zero), we turn off the warnings here.
End of explanation
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(10,8))
plt.plot(likev[:,0], 2*likev[:,1])
plt.xlabel("Variance of random slope", size=17)
plt.ylabel("-2 times profile log likelihood", size=17)
"""
Explanation: Here is a plot of the profile likelihood function. We multiply the log-likelihood difference by 2 to obtain the usual $\chi^2$ reference distribution with 1 degree of freedom.
End of explanation
"""
re = mdf.cov_re.iloc[1, 1]
with warnings.catch_warnings():
# Parameter is often on the boundary
warnings.simplefilter("ignore", ConvergenceWarning)
likev = mdf.profile_re(1, 're', dist_low=.5*re, dist_high=0.8*re)
plt.figure(figsize=(10, 8))
plt.plot(likev[:,0], 2*likev[:,1])
plt.xlabel("Variance of random slope", size=17)
lbl = plt.ylabel("-2 times profile log likelihood", size=17)
"""
Explanation: Here is a plot of the profile likelihood function. The profile likelihood plot shows that the MLE of the random slope variance parameter is a very small positive number, and that there is low uncertainty in this estimate.
End of explanation
"""
|
Naereen/notebooks | Generer_des_fausses_citations_latines_du_Roi_Loth.ipynb | mit | citation = citation_aleatoire(italic=True)
display(Markdown("> {}".format(citation)))
"""
Explanation: Table of Contents
<p><div class="lev1 toc-item"><a href="#Générer-des-fausses-citations-latines-du-Roi-Loth,-avec-Python,-Wikiquote-et-des-chaînes-de-Markov" data-toc-modified-id="Générer-des-fausses-citations-latines-du-Roi-Loth,-avec-Python,-Wikiquote-et-des-chaînes-de-Markov-1"><span class="toc-item-num">1 </span>Générer des fausses citations latines du Roi Loth, avec Python, Wikiquote et des chaînes de Markov</a></div><div class="lev2 toc-item"><a href="#Dépendances" data-toc-modified-id="Dépendances-11"><span class="toc-item-num">1.1 </span>Dépendances</a></div><div class="lev2 toc-item"><a href="#Récupérer-et-nettoyer-les-données" data-toc-modified-id="Récupérer-et-nettoyer-les-données-12"><span class="toc-item-num">1.2 </span>Récupérer et nettoyer les données</a></div><div class="lev2 toc-item"><a href="#Exploration-de-chaînes-de-Markov-pour-la-génération-aléatoire" data-toc-modified-id="Exploration-de-chaînes-de-Markov-pour-la-génération-aléatoire-13"><span class="toc-item-num">1.3 </span>Exploration de chaînes de Markov pour la génération aléatoire</a></div><div class="lev2 toc-item"><a href="#Fausses-locutions-latines" data-toc-modified-id="Fausses-locutions-latines-14"><span class="toc-item-num">1.4 </span>Fausses locutions latines</a></div><div class="lev2 toc-item"><a href="#Fausses-citations-du-Roi-Loth" data-toc-modified-id="Fausses-citations-du-Roi-Loth-15"><span class="toc-item-num">1.5 </span>Fausses citations du Roi Loth</a></div><div class="lev3 toc-item"><a href="#Premier-exemple" data-toc-modified-id="Premier-exemple-151"><span class="toc-item-num">1.5.1 </span>Premier exemple</a></div><div class="lev3 toc-item"><a href="#Exemples" data-toc-modified-id="Exemples-152"><span class="toc-item-num">1.5.2 </span>Exemples</a></div><div class="lev3 toc-item"><a href="#Générer-aléatoirement-les-métadonnées-de-l'épisode" data-toc-modified-id="Générer-aléatoirement-les-métadonnées-de-l'épisode-153"><span class="toc-item-num">1.5.3 </span>Générer aléatoirement les métadonnées de l'épisode</a></div><div class="lev3 toc-item"><a href="#Générer-aléatoirement-les-explications-foireuses-du-Roi-Loth" data-toc-modified-id="Générer-aléatoirement-les-explications-foireuses-du-Roi-Loth-154"><span class="toc-item-num">1.5.4 </span>Générer aléatoirement les explications foireuses du Roi Loth</a></div><div class="lev3 toc-item"><a href="#Combiner-le-tout-!" data-toc-modified-id="Combiner-le-tout-!-155"><span class="toc-item-num">1.5.5 </span>Combiner le tout !</a></div><div class="lev3 toc-item"><a href="#Exemples" data-toc-modified-id="Exemples-156"><span class="toc-item-num">1.5.6 </span>Exemples</a></div><div class="lev3 toc-item"><a href="#Joli-affichage" data-toc-modified-id="Joli-affichage-157"><span class="toc-item-num">1.5.7 </span>Joli affichage</a></div><div class="lev3 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-158"><span class="toc-item-num">1.5.8 </span>Conclusion</a></div>
# Générer des fausses citations latines du Roi Loth, avec Python, Wikiquote et des chaînes de Markov
J'aimerai montrer ici comment générer des fausses citations latines, dignes du [Roi Loth](https://fr.wikipedia.org/wiki/Personnages_de_Kaamelott#Loth_d%E2%80%99Orcanie) de [Kaamelott](https://fr.wikiquote.org/wiki/Kaamelott), avec Python, des données extraites de [sa page Wikiquote](https://fr.wikiquote.org/wiki/Kaamelott/Loth) et des [chaînes de Markov](https://github.com/jilljenn/markov.py).
> Cf. [ce ticket](https://github.com/Naereen/notebooks/issues/13) pour l'idée initiale.
Exemple de sortie :
End of explanation
"""
%load_ext watermark
%watermark -v -m -a "Lilian Besson (Naereen)" -p lea -g
import os
import random
from string import ascii_lowercase
from collections import Counter, defaultdict
"""
Explanation: Dépendances
End of explanation
"""
from lea import Lea
"""
Explanation: Le module lea sera très pratique pour manipuler les probabilités pour les chaînes de Markov.
End of explanation
"""
%%bash
wget --no-verbose "https://en.wikipedia.org/wiki/List_of_Latin_phrases_(full)" -O /tmp/latin.html
grep -o '<b>[^<]*</b>' /tmp/latin.html | sed s_'</\?b>'_''_g | sort | uniq | sort | uniq > /tmp/data_latin.txt
!head data/latin.txt
"""
Explanation: Récupérer et nettoyer les données
J'ai utilisé cette page Wikipédia et deux lignes de Bash :
End of explanation
"""
!head data/latin.txt
!ls -larth data/latin.txt
!wc data/latin.txt
"""
Explanation: Ensuite il faut un peu de nettoyage pour enlever les lignes qui ont été incorrectement ajoutées dans le fichier (j'ai fait ça à la main).
End of explanation
"""
def markov(corpus, start, length):
# Counting occurrences
next_one = defaultdict(Counter)
for sentence in corpus:
words = sentence.split()
nb_words = len(words)
for i in range(nb_words - 1):
next_one[words[i]][words[i + 1]] += 1
# Initializing states
states = {}
for word in next_one:
states[word] = Lea.fromValFreqsDict(next_one[word])
# Outputting visited states
word = start
words = [word]
for _ in range(length - 1):
word = states[word].random()
words.append(word)
return(words)
"""
Explanation: On a 1571 citations latines, c'est déjà un corpus conséquent !
Exploration de chaînes de Markov pour la génération aléatoire
J'utilise cette fonction markov écrite par Jill-Jênn Vie.
End of explanation
"""
corpus = [
'je mange des cerises',
'je mange des bananes',
'je conduis des camions',
]
start = 'je'
length = 4
"""
Explanation: Par exemple :
End of explanation
"""
for _ in range(3):
words = markov(corpus, start, length)
print(' '.join(words))
"""
Explanation: Et on peut générer 3 phrases aléatoires :
End of explanation
"""
WORD_LIST = "data/latin.txt"
corpus = open(WORD_LIST).readlines()
print("Exemple d'une citation :", corpus[0])
print("Il y a", len(corpus), "citations.")
starts = [c.split()[0] for c in corpus]
start = random.choice(starts)
print("Exemple d'un mot de début de citation :", start)
print("Il y a", len(starts), "mots de débuts de citations.")
proba_title = len([1 for s in starts if s.istitle()]) / len(starts)
print("Il y a {:.3%} chance de commencer une citation par une majuscule.".format(proba_title))
"""
Explanation: Fausses locutions latines
On va extraire le corpus, la liste des premiers mots, et la probabilité qu'un mot en début de citation commence par une majuscule.
End of explanation
"""
proba_title = 1
"""
Explanation: Mais en fait, le Roi Loth commence toujours ses citations latines par une majuscule :
End of explanation
"""
length_min = 3
length_max = 6
"""
Explanation: On va générer des locutions de 3 à 6 mots :
End of explanation
"""
def markov_try_while_failing(corpus, starts, length_min, length_max, proba_title, nb_max_trial=100):
# Try 100 times to generate a sentence
start = random.choice(starts)
length = random.randint(length_min, length_max)
for trial in range(nb_max_trial):
try:
words = markov(corpus, start, length)
if random.random() <= proba_title:
words[0] = words[0].title()
return words # comment to debug
print(' '.join(words))
break
except KeyError:
start = random.choice(starts)
length = random.randint(length_min, length_max)
continue
raise ValueError("Echec")
"""
Explanation: On a bientôt ce qu'il faut pour générer une locution latine aléatoire.
Il arrive que la chaîne de Markov se bloque, donc on va juste essayer plusieurs fois avec des débuts différents.
End of explanation
"""
for _ in range(10):
words = markov_try_while_failing(corpus, starts, length_min, length_max, proba_title)
print(' '.join(words))
"""
Explanation: On peut essayer :
End of explanation
"""
episodes = [
"Livre III, L’Assemblée des rois 2e partie, écrit par Alexandre Astier.",
"Livre III, L’Assemblée des rois 2e partie, écrit par Alexandre Astier.", # présent deux fois
"Livre IV, Le désordre et la nuit, écrit par Alexandre Astier.",
"Livre V, Misère noire, écrit par Alexandre Astier.",
"Livre VI, Arturus Rex, écrit par Alexandre Astier.",
"Livre VI, Lacrimosa, écrit par Alexandre Astier."
]
def metadonnee_aleatoire(episodes=episodes):
episode = random.choice(episodes)
return "D'après François Rollin, inspiré par Kaamelott, " + episode
"""
Explanation: Ça a déjà l'air pas mal latin !
Fausses citations du Roi Loth
Pour générer une citation du Roi Loth, il ne suffit pas d'avoir des locutions latines.
Il faut le contexte, l'explication, une fausse citation d'un épisode de Kaamelott etc...
Premier exemple
Ecouter celle là : Misa brevis, et spiritus maxima.
<audio src="data/tres_en_colere.mp3" controls="controls">Your browser does not support the audio element.</audio>
Exemples
Ave Cesar, rosae rosam, et spiritus rex ! Ah non, parce que là, j’en ai marre !
-- François Rollin, Kaamelott, Livre III, L’Assemblée des rois 2e partie, écrit par Alexandre Astier.
Tempora mori, tempora mundis recorda. Voilà. Eh bien ça, par exemple, ça veut absolument rien dire, mais l’effet reste le même, et pourtant j’ai jamais foutu les pieds dans une salle de classe attention !
-- François Rollin, Kaamelott, Livre III, L’Assemblée des rois 2e partie, écrit par Alexandre Astier.
Victoriae mundis et mundis lacrima. Bon, ça ne veut absolument rien dire, mais je trouve que c’est assez dans le ton.
-- François Rollin, Kaamelott, Livre IV, Le désordre et la nuit, écrit par Alexandre Astier.
Misa brevis et spiritus maxima, ça veut rien dire, mais je suis très en colère contre moi-même.
-- François Rollin, Kaamelott, Livre V, Misère noire, écrit par Alexandre Astier.
Deus minimi placet : seul les dieux décident.
-- François Rollin, Kaamelott, Livre VI, Arturus Rex, écrit par Alexandre Astier.
"Mundi placet et spiritus minima", ça n'a aucun sens mais on pourrait très bien imaginer une traduction du type : "Le roseau plie, mais ne cède... qu'en cas de pépin" ce qui ne veut rien dire non plus.
-- François Rollin, Kaamelott, Livre VI, Lacrimosa, écrit par Alexandre Astier.
Générer aléatoirement les métadonnées de l'épisode
C'est facile.
End of explanation
"""
explications = [
". Ah non, parce que là, j’en ai marre !",
". Voilà. Eh bien ça, par exemple, ça veut absolument rien dire, mais l’effet reste le même, et pourtant j’ai jamais foutu les pieds dans une salle de classe attention !",
". Bon, ça ne veut absolument rien dire, mais je trouve que c’est assez dans le ton.",
", ça veut rien dire, mais je suis très en colère contre moi-même.",
" : seul les dieux décident.",
""", ça n'a aucun sens mais on pourrait très bien imaginer une traduction du type : "Le roseau plie, mais ne cède... qu'en cas de pépin", ce qui ne veut rien dire non plus.""",
]
"""
Explanation: Générer aléatoirement les explications foireuses du Roi Loth
C'est moins facile... Mais sans chercher à être parfait, on va juste prendre une explication parmi celles qui existent :
End of explanation
"""
explications += [
". Ah non, parce qu'au bout d'un moment, zut !",
". Voilà, ça ne veut rien dire, mais c'est assez dans le ton !",
". Bon, ça n'a aucun sens, mais j'aime bien ce petit ton décalé.",
". Le latin, ça impressionne ! Surtout les grouillots.",
", ça n'a aucun sens, mais je suis très en colère contre moi-même.",
", ça n'a aucun sens, mais je fais ça par amour.",
" : la victoire par la sagesse.",
" : les livres contiennent la sagesse des anciens.",
" : à Rome seul compte le pouvoir.",
" : seul les puissants agissent.",
" : le mariage est une bénédiction.",
" : ça veut rien dire, mais ça impressionne !",
""", ça veut rien dire mais on pourrait très bien imaginer une traduction du type : "Le vent tourne pour ceux qui savent écouter", ce qui ne veut rien dire non plus.""",
""", ça n'a aucun sens mais pourquoi pas une traduction du genre : "Les imbéciles dorment, les forts agissent mais dorment aussi", ce qui n'a aucun sens non plus.""",
]
def explication_aleatoire():
return random.choice(explications)
"""
Explanation: Et quelques variations :
End of explanation
"""
def citation_aleatoire(italic=False):
metadonnee = metadonnee_aleatoire()
explication = explication_aleatoire()
words = markov_try_while_failing(corpus, starts, length_min, length_max, proba_title)
locution = ' '.join(words)
if italic:
citation = '"*{}*"{} -- {}'.format(locution, explication, metadonnee)
else:
citation = '"{}"{} -- {}'.format(locution, explication, metadonnee)
return citation
"""
Explanation: Combiner le tout !
C'est très facile :
End of explanation
"""
for _ in range(10):
print(">", citation_aleatoire(italic=True))
"""
Explanation: Exemples
End of explanation
"""
from IPython.display import display, Markdown
for _ in range(10):
citation = citation_aleatoire(italic=True)
display(Markdown("> {}".format(citation)))
"""
Explanation: Joli affichage
End of explanation
"""
|
lesley2958/lesley2958.github.io | blog/2018/denoising.ipynb | mit | import numpy as np
import math, random
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(0)
"""
Explanation: Signal denoising using RNNs in PyTorch
In this post, I'll use PyTorch to create a simple Recurrent Neural Network (RNN) for denoising a signal. I started learning RNNs using PyTorch. However, I felt that many of the examples were fairly complex. So, here's an attempt to create a simple educational example.
Problem description
Given a noisy sine wave as an input, we want to estimate the denoised signal. This is shown in the figure below.
Customary imports
End of explanation
"""
# Generating a clean sine wave
def sine(X, signal_freq=60.):
return np.sin(2 * np.pi * (X) / signal_freq)
# Adding uniform noise
def noisy(Y, noise_range=(-0.35, 0.35)):
noise = np.random.uniform(noise_range[0], noise_range[1], size=Y.shape)
return Y + noise
# Create a noisy and clean sine wave
def sample(sample_size):
random_offset = random.randint(0, sample_size)
X = np.arange(sample_size)
out = sine(X + random_offset)
inp = noisy(out)
return inp, out
"""
Explanation: Creating noisy and denoised signals
Let's now write functions to cerate a sine wave, add some noise on top of it. This way we're able to create a noisy verison of the sine wave.
End of explanation
"""
inp, out = sample(100)
plt.plot(inp, label='Noisy')
plt.plot(out, label ='Denoised')
plt.legend()
"""
Explanation: Let's now invoke the functions we defined to generate the figure we saw in the problem description.
End of explanation
"""
def create_dataset(n_samples=10000, sample_size=100):
data_inp = np.zeros((n_samples, sample_size))
data_out = np.zeros((n_samples, sample_size))
for i in range(n_samples):
sample_inp, sample_out = sample(sample_size)
data_inp[i, :] = sample_inp
data_out[i, :] = sample_out
return data_inp, data_out
"""
Explanation: Creating dataset
Now, let's write a simple function to generate a dataset of such noisy and denoised samples.
End of explanation
"""
data_inp, data_out = create_dataset()
train_inp, train_out = data_inp[:8000], data_out[:8000]
test_inp, test_out = data_inp[8000:], data_out[8000:]
import torch
import torch.nn as nn
from torch.autograd import Variable
"""
Explanation: Now, creating the dataset, and dividing it into train and test set.
End of explanation
"""
input_dim = 1
hidden_size = 30
num_layers = 1
class CustomRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(CustomRNN, self).__init__()
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size, batch_first=True)
self.linear = nn.Linear(hidden_size, output_size, )
self.act = nn.Tanh()
def forward(self, x):
pred, hidden = self.rnn(x, None)
pred = self.act(self.linear(pred)).view(pred.data.shape[0], -1, 1)
return pred
r= CustomRNN(input_dim, hidden_size, 1)
r
"""
Explanation: Creating RNN
We have 1d sine waves, which we want to denoise. Thus, we have input dimension of 1. Let's create a simple 1-layer RNN with 30 hidden units.
End of explanation
"""
# Storing predictions per iterations to visualise later
predictions = []
optimizer = torch.optim.Adam(r.parameters(), lr=1e-2)
loss_func = nn.L1Loss()
for t in range(301):
hidden = None
inp = Variable(torch.Tensor(train_inp.reshape((train_inp.shape[0], -1, 1))), requires_grad=True)
out = Variable(torch.Tensor(train_out.reshape((train_out.shape[0], -1, 1))) )
pred = r(inp)
optimizer.zero_grad()
predictions.append(pred.data.numpy())
loss = loss_func(pred, out)
if t%20==0:
print(t, loss.data[0])
loss.backward()
optimizer.step()
"""
Explanation: Training
End of explanation
"""
t_inp = Variable(torch.Tensor(test_inp.reshape((test_inp.shape[0], -1, 1))), requires_grad=True)
pred_t = r(t_inp)
# Test loss
print(loss_func(pred_t, Variable(torch.Tensor(test_out.reshape((test_inp.shape[0], -1, 1))))).data[0])
"""
Explanation: Great. As expected, the loss reduces over time.
Generating prediction on test set
End of explanation
"""
sample_num = 23
plt.plot(pred_t[sample_num].data.numpy(), label='Pred')
plt.plot(test_out[sample_num], label='GT')
plt.legend()
plt.title("Sample num: {}".format(sample_num))
"""
Explanation: Visualising sample denoising
End of explanation
"""
bidirectional = True
if bidirectional:
num_directions = 2
else:
num_directions = 1
class CustomRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(CustomRNN, self).__init__()
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size,
batch_first=True, bidirectional=bidirectional, dropout=0.1)
self.linear = nn.Linear(hidden_size*num_directions, output_size, )
self.act = nn.Tanh()
def forward(self, x):
pred, hidden = self.rnn(x, None)
pred = self.act(self.linear(pred)).view(pred.data.shape[0], -1, 1)
return pred
r= CustomRNN(input_dim, hidden_size, 1)
r
# Storing predictions per iterations to visualise later
predictions = []
optimizer = torch.optim.Adam(r.parameters(), lr=1e-2)
loss_func = nn.L1Loss()
for t in range(301):
hidden = None
inp = Variable(torch.Tensor(train_inp.reshape((train_inp.shape[0], -1, 1))), requires_grad=True)
out = Variable(torch.Tensor(train_out.reshape((train_out.shape[0], -1, 1))) )
pred = r(inp)
optimizer.zero_grad()
predictions.append(pred.data.numpy())
loss = loss_func(pred, out)
if t%20==0:
print(t, loss.data[0])
loss.backward()
optimizer.step()
t_inp = Variable(torch.Tensor(test_inp.reshape((test_inp.shape[0], -1, 1))), requires_grad=True)
pred_t = r(t_inp)
# Test loss
print(loss_func(pred_t, Variable(torch.Tensor(test_out.reshape((test_inp.shape[0], -1, 1))))).data[0])
sample_num = 23
plt.plot(pred_t[sample_num].data.numpy(), label='Pred')
plt.plot(test_out[sample_num], label='GT')
plt.legend()
plt.title("Sample num: {}".format(sample_num))
"""
Explanation: Bidirectional RNN
Seems reasonably neat to me! If only the first few points were better esimtated. Any idea why they're not? Maybe, we need a bidirectional RNN? Let's try one, and I'll also add dropout to prevent overfitting.
End of explanation
"""
bidirectional = True
if bidirectional:
num_directions = 2
else:
num_directions = 1
class CustomRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(CustomRNN, self).__init__()
self.rnn = nn.GRU(input_size=input_size, hidden_size=hidden_size,
batch_first=True, bidirectional=bidirectional, dropout=0.1)
self.linear = nn.Linear(hidden_size*num_directions, output_size, )
self.act = nn.Tanh()
def forward(self, x):
pred, hidden = self.rnn(x, None)
pred = self.act(self.linear(pred)).view(pred.data.shape[0], -1, 1)
return pred
r= CustomRNN(input_dim, hidden_size, 1)
r
# Storing predictions per iterations to visualise later
predictions = []
optimizer = torch.optim.Adam(r.parameters(), lr=1e-2)
loss_func = nn.L1Loss()
for t in range(201):
hidden = None
inp = Variable(torch.Tensor(train_inp.reshape((train_inp.shape[0], -1, 1))), requires_grad=True)
out = Variable(torch.Tensor(train_out.reshape((train_out.shape[0], -1, 1))) )
pred = r(inp)
optimizer.zero_grad()
predictions.append(pred.data.numpy())
loss = loss_func(pred, out)
if t%20==0:
print(t, loss.data[0])
loss.backward()
optimizer.step()
t_inp = Variable(torch.Tensor(test_inp.reshape((test_inp.shape[0], -1, 1))), requires_grad=True)
pred_t = r(t_inp)
# Test loss
print(loss_func(pred_t, Variable(torch.Tensor(test_out.reshape((test_inp.shape[0], -1, 1))))).data[0])
sample_num = 23
plt.plot(pred_t[sample_num].data.numpy(), label='Pred')
plt.plot(test_out[sample_num], label='GT')
plt.legend()
plt.title("Sample num: {}".format(sample_num))
"""
Explanation: Hmm. The estimated signal looks better for the initial few points. But, gets worse for the final few points. Oops! Guess, now the reverse RNN causes problems for its first few points!
From RNNs to GRU
Let's now replace our RNN with GRU to see if the model improves.
End of explanation
"""
plt.rcParams['animation.ffmpeg_path'] = './ffmpeg'
from matplotlib.animation import FuncAnimation
fig, ax = plt.subplots(figsize=(4, 3))
fig.set_tight_layout(True)
# Query the figure's on-screen size and DPI. Note that when saving the figure to
# a file, we need to provide a DPI for that separately.
print('fig size: {0} DPI, size in inches {1}'.format(
fig.get_dpi(), fig.get_size_inches()))
def update(i):
label = 'Iteration {0}'.format(i)
ax.cla()
ax.plot(np.array(predictions)[i, 0, :, 0].T, label='Pred')
ax.plot(train_out[0, :], label='GT')
ax.legend()
ax.set_title(label)
anim = FuncAnimation(fig, update, frames=range(0, 201, 4), interval=20)
anim.save('learning.mp4',fps=20)
plt.close()
from IPython.display import Video
Video("learning.mp4")
"""
Explanation: The GRU prediction seems to far better! Maybe, the RNNs suffer from the vanishing gradients problem?
Visualising estimations as model improves
Let's now write a simple function to visualise the estimations as a function of iterations. We'd expect the estimations to improve over time.
End of explanation
"""
for num_unknown_values in range(50):
train_out[np.random.choice(list(range(0, 8000))), np.random.choice(list(range(0, 100)))] = np.NAN
np.isnan(train_out).sum()
"""
Explanation: This looks great! We can see how our model learns to learn reasonably good denoised signals over time. It doesn't start great though. Would a better initialisation help? I certainly feel that for this particular problem it would, as predicting the output the same as input is a good starting point!
Bonus: Handling missing values in denoised training data
The trick to handling missing values in the denoised training data (the quantity we wish to estimate) is to compute the loss only over the present values. This requires creating a mask for finding all entries except missing.
One such way to do so would be: mask = out > -1* 1e8 where out is the tensor containing missing values.
Let's first add some unknown values (np.NaN) in the training output data.
End of explanation
"""
r= CustomRNN(input_dim, 2, 1)
r
# Storing predictions per iterations to visualise later
predictions = []
optimizer = torch.optim.Adam(r.parameters(), lr=1e-2)
loss_func = nn.L1Loss()
for t in range(20):
hidden = None
inp = Variable(torch.Tensor(train_inp.reshape((train_inp.shape[0], -1, 1))), requires_grad=True)
out = Variable(torch.Tensor(train_out.reshape((train_out.shape[0], -1, 1))) )
pred = r(inp)
optimizer.zero_grad()
predictions.append(pred.data.numpy())
# Create a mask to compute loss only on defined quantities
mask = out > -1* 1e8
loss = loss_func(pred[mask], out[mask])
if t%20==0:
print(t, loss.data[0])
loss.backward()
optimizer.step()
"""
Explanation: Testing using a network with few parameters.
End of explanation
"""
|
ajhenrikson/phys202-2015-work | assignments/assignment05/InteractEx03.ipynb | mit | %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
"""
Explanation: Interact Exercise 3
Imports
End of explanation
"""
def soliton(x, t, c, a):
"""Return phi(x, t) for a soliton wave with constants c and a."""
p=.5*c*((1/np.cosh((c**.5)/2*(x-c*t-a)))**2)
return p
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
"""
Explanation: Using interact for animation with data
A soliton is a constant velocity wave that maintains its shape as it propagates. They arise from non-linear wave equations, such has the Korteweg–de Vries equation, which has the following analytical solution:
$$
\phi(x,t) = \frac{1}{2} c \mathrm{sech}^2 \left[ \frac{\sqrt{c}}{2} \left(x - ct - a \right) \right]
$$
The constant c is the velocity and the constant a is the initial location of the soliton.
Define soliton(x, t, c, a) function that computes the value of the soliton wave for the given arguments. Your function should work when the postion x or t are NumPy arrays, in which case it should return a NumPy array itself.
End of explanation
"""
tmin = 0.0
tmax = 10.0
tpoints = 100
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
"""
Explanation: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays:
End of explanation
"""
phi=np.ones([xpoints,tpoints])
for i in x:
for j in t:
phi[i,j]=soliton(x[i],t[j],c,a)
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
"""
Explanation: Compute a 2d NumPy array called phi:
It should have a dtype of float.
It should have a shape of (xpoints, tpoints).
phi[i,j] should contain the value $\phi(x[i],t[j])$.
End of explanation
"""
def plot_soliton_data(i=0):
"""Plot the soliton data at t[i] versus x."""
plt.plot(soliton(x,t[i],c,a))
plt.xlabel('x')
plt.ylabel('t[i]')
plt.ylim(0,.6)
plt.title('phi(x,t[i])')
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
"""
Explanation: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
End of explanation
"""
interactive(plot_soliton_data,i=(0,100,10))
assert True # leave this for grading the interact with plot_soliton_data cell
"""
Explanation: Use interact to animate the plot_soliton_data function versus time.
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.13.0/examples/notebooks/generated/regression_diagnostics.ipynb | bsd-3-clause | %matplotlib inline
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import matplotlib.pyplot as plt
# Load data
url = "https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/HistData/Guerry.csv"
dat = pd.read_csv(url)
# Fit regression model (using the natural log of one of the regressors)
results = smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=dat).fit()
# Inspect the results
print(results.summary())
"""
Explanation: Regression diagnostics
This example file shows how to use a few of the statsmodels regression diagnostic tests in a real-life context. You can learn about more tests and find out more information about the tests here on the Regression Diagnostics page.
Note that most of the tests described here only return a tuple of numbers, without any annotation. A full description of outputs is always included in the docstring and in the online statsmodels documentation. For presentation purposes, we use the zip(name,test) construct to pretty-print short descriptions in the examples below.
Estimate a regression model
End of explanation
"""
name = ["Jarque-Bera", "Chi^2 two-tail prob.", "Skew", "Kurtosis"]
test = sms.jarque_bera(results.resid)
lzip(name, test)
"""
Explanation: Normality of the residuals
Jarque-Bera test:
End of explanation
"""
name = ["Chi^2", "Two-tail probability"]
test = sms.omni_normtest(results.resid)
lzip(name, test)
"""
Explanation: Omni test:
End of explanation
"""
from statsmodels.stats.outliers_influence import OLSInfluence
test_class = OLSInfluence(results)
test_class.dfbetas[:5, :]
"""
Explanation: Influence tests
Once created, an object of class OLSInfluence holds attributes and methods that allow users to assess the influence of each observation. For example, we can compute and extract the first few rows of DFbetas by:
End of explanation
"""
from statsmodels.graphics.regressionplots import plot_leverage_resid2
fig, ax = plt.subplots(figsize=(8, 6))
fig = plot_leverage_resid2(results, ax=ax)
"""
Explanation: Explore other options by typing dir(influence_test)
Useful information on leverage can also be plotted:
End of explanation
"""
np.linalg.cond(results.model.exog)
"""
Explanation: Other plotting options can be found on the Graphics page.
Multicollinearity
Condition number:
End of explanation
"""
name = ["Lagrange multiplier statistic", "p-value", "f-value", "f p-value"]
test = sms.het_breuschpagan(results.resid, results.model.exog)
lzip(name, test)
"""
Explanation: Heteroskedasticity tests
Breush-Pagan test:
End of explanation
"""
name = ["F statistic", "p-value"]
test = sms.het_goldfeldquandt(results.resid, results.model.exog)
lzip(name, test)
"""
Explanation: Goldfeld-Quandt test
End of explanation
"""
name = ["t value", "p value"]
test = sms.linear_harvey_collier(results)
lzip(name, test)
"""
Explanation: Linearity
Harvey-Collier multiplier test for Null hypothesis that the linear specification is correct:
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/preprocessing_layers.ipynb | apache-2.0 | !pip install -q sklearn
"""
Explanation: Classifying Structured Data using Keras Preprocessing Layers
Learning Objectives
Load a CSV file using Pandas.
Build an input pipeline to batch and shuffle the rows using tf.data.
Map from columns in the CSV to features used to train the model using Keras Preprocessing layers.
Build, train, and evaluate a model using Keras.
Introduction
In this notebook, you learn how to classify structured data (e.g. tabular data in a CSV). You will use Keras to define the model, and preprocessing layers as a bridge to map from columns in a CSV to features used to train the model.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
Note: This tutorial is similar to Classify structured data with feature columns. This version uses new experimental Keras Preprocessing Layers instead of tf.feature_column. Keras Preprocessing Layers are more intuitive, and can be easily included inside your model to simplify deployment.
The Dataset
You will use a simplified version of the PetFinder dataset. There are several thousand rows in the CSV. Each row describes a pet, and each column describes an attribute. You will use this information to predict if the pet will be adopted.
Following is a description of this dataset. Notice there are both numeric and categorical columns. There is a free text column which you will not use in this tutorial.
Column | Description| Feature Type | Data Type
------------|--------------------|----------------------|-----------------
Type | Type of animal (Dog, Cat) | Categorical | string
Age | Age of the pet | Numerical | integer
Breed1 | Primary breed of the pet | Categorical | string
Color1 | Color 1 of pet | Categorical | string
Color2 | Color 2 of pet | Categorical | string
MaturitySize | Size at maturity | Categorical | string
FurLength | Fur length | Categorical | string
Vaccinated | Pet has been vaccinated | Categorical | string
Sterilized | Pet has been sterilized | Categorical | string
Health | Health Condition | Categorical | string
Fee | Adoption Fee | Numerical | integer
Description | Profile write-up for this pet | Text | string
PhotoAmt | Total uploaded photos for this pet | Numerical | integer
AdoptionSpeed | Speed of adoption | Classification | integer
Import TensorFlow and other libraries
End of explanation
"""
# import necessary libraries
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
# print the tensorflow version
tf.__version__
"""
Explanation: Restart the kernel before proceeding further (On the Notebook menu, select Kernel > Restart Kernel > Restart).
End of explanation
"""
import pathlib
dataset_url = 'http://storage.googleapis.com/download.tensorflow.org/data/petfinder-mini.zip'
csv_file = 'gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/petfinder-mini_toy.csv'
tf.keras.utils.get_file('petfinder_mini.zip', dataset_url,
extract=True, cache_dir='.')
# TODO
# read a comma-separated values (csv) file into DataFrame
dataframe = pd.read_csv(csv_file)
# get the first n rows
dataframe.head()
"""
Explanation: Use Pandas to create a dataframe
Pandas is a Python library with many helpful utilities for loading and working with structured data. You will use Pandas to download the dataset from a URL, and load it into a dataframe.
End of explanation
"""
# In the original dataset "4" indicates the pet was not adopted.
dataframe['target'] = np.where(dataframe['AdoptionSpeed']==4, 0, 1)
# Drop un-used columns.
dataframe = dataframe.drop(columns=['AdoptionSpeed', 'Description'])
"""
Explanation: Create target variable
The task in the Kaggle competition is to predict the speed at which a pet will be adopted (e.g., in the first week, the first month, the first three months, and so on). Let's simplify this for our tutorial. Here, you will transform this into a binary classification problem, and simply predict whether the pet was adopted, or not.
After modifying the label column, 0 will indicate the pet was not adopted, and 1 will indicate it was.
End of explanation
"""
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
"""
Explanation: Split the dataframe into train, validation, and test
The dataset you downloaded was a single CSV file. You will split this into train, validation, and test sets.
End of explanation
"""
# A utility method to create a tf.data dataset from a Pandas Dataframe
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
ds = ds.prefetch(batch_size)
return ds
"""
Explanation: Create an input pipeline using tf.data
Next, you will wrap the dataframes with tf.data, in order to shuffle and batch the data. If you were working with a very large CSV file (so large that it does not fit into memory), you would use tf.data to read it from disk directly. That is not covered in this tutorial.
End of explanation
"""
batch_size = 5
# TODO
# call the necessary function with required parameters
train_ds = df_to_dataset(train, batch_size=batch_size)
[(train_features, label_batch)] = train_ds.take(1)
print('Every feature:', list(train_features.keys()))
print('A batch of ages:', train_features['Age'])
print('A batch of targets:', label_batch )
"""
Explanation: Now that you have created the input pipeline, let's call it to see the format of the data it returns. You have used a small batch size to keep the output readable.
End of explanation
"""
def get_normalization_layer(name, dataset):
# Create a Normalization layer for our feature.
normalizer = preprocessing.Normalization(axis=None)
# TODO
# Prepare a Dataset that only yields our feature.
feature_ds = dataset.map(lambda x, y: x[name])
# Learn the statistics of the data.
normalizer.adapt(feature_ds)
return normalizer
photo_count_col = train_features['PhotoAmt']
layer = get_normalization_layer('PhotoAmt', train_ds)
layer(photo_count_col)
"""
Explanation: You can see that the dataset returns a dictionary of column names (from the dataframe) that map to column values from rows in the dataframe.
Demonstrate the use of preprocessing layers.
The Keras preprocessing layers API allows you to build Keras-native input processing pipelines. You will use 3 preprocessing layers to demonstrate the feature preprocessing code.
Normalization - Feature-wise normalization of the data.
CategoryEncoding - Category encoding layer.
StringLookup - Maps strings from a vocabulary to integer indices.
IntegerLookup - Maps integers from a vocabulary to integer indices.
You can find a list of available preprocessing layers here.
Numeric columns
For each of the Numeric feature, you will use a Normalization() layer to make sure the mean of each feature is 0 and its standard deviation is 1.
get_normalization_layer function returns a layer which applies featurewise normalization to numerical features.
End of explanation
"""
def get_category_encoding_layer(name, dataset, dtype, max_tokens=None):
# Create a StringLookup layer which will turn strings into integer indices
if dtype == 'string':
index = preprocessing.StringLookup(max_tokens=max_tokens)
else:
index = preprocessing.IntegerLookup(max_tokens=max_tokens)
# TODO
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
# Learn the set of possible values and assign them a fixed integer index.
index.adapt(feature_ds)
# Create a Discretization for our integer indices.
encoder = preprocessing.CategoryEncoding(num_tokens=index.vocabulary_size())
# Apply one-hot encoding to our indices. The lambda function captures the
# layer so we can use them, or include them in the functional model later.
return lambda feature: encoder(index(feature))
type_col = train_features['Type']
layer = get_category_encoding_layer('Type', train_ds, 'string')
layer(type_col)
"""
Explanation: Note: If you many numeric features (hundreds, or more), it is more efficient to concatenate them first and use a single normalization layer.
Categorical columns
In this dataset, Type is represented as a string (e.g. 'Dog', or 'Cat'). You cannot feed strings directly to a model. The preprocessing layer takes care of representing strings as a one-hot vector.
get_category_encoding_layer function returns a layer which maps values from a vocabulary to integer indices and one-hot encodes the features.
End of explanation
"""
type_col = train_features['Age']
category_encoding_layer = get_category_encoding_layer('Age', train_ds,
'int64', 5)
category_encoding_layer(type_col)
"""
Explanation: Often, you don't want to feed a number directly into the model, but instead use a one-hot encoding of those inputs. Consider raw data that represents a pet's age.
End of explanation
"""
batch_size = 256
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
all_inputs = []
encoded_features = []
# Numeric features.
for header in ['PhotoAmt', 'Fee']:
numeric_col = tf.keras.Input(shape=(1,), name=header)
normalization_layer = get_normalization_layer(header, train_ds)
encoded_numeric_col = normalization_layer(numeric_col)
all_inputs.append(numeric_col)
encoded_features.append(encoded_numeric_col)
# Categorical features encoded as integers.
age_col = tf.keras.Input(shape=(1,), name='Age', dtype='int64')
encoding_layer = get_category_encoding_layer('Age', train_ds, dtype='int64',
max_tokens=5)
encoded_age_col = encoding_layer(age_col)
all_inputs.append(age_col)
encoded_features.append(encoded_age_col)
# Categorical features encoded as string.
categorical_cols = ['Type', 'Color1', 'Color2', 'Gender', 'MaturitySize',
'FurLength', 'Vaccinated', 'Sterilized', 'Health', 'Breed1']
for header in categorical_cols:
categorical_col = tf.keras.Input(shape=(1,), name=header, dtype='string')
encoding_layer = get_category_encoding_layer(header, train_ds, dtype='string',
max_tokens=5)
encoded_categorical_col = encoding_layer(categorical_col)
all_inputs.append(categorical_col)
encoded_features.append(encoded_categorical_col)
"""
Explanation: Choose which columns to use
You have seen how to use several types of preprocessing layers. Now you will use them to train a model. You will be using Keras-functional API to build the model. The Keras functional API is a way to create models that are more flexible than the tf.keras.Sequential API.
The goal of this tutorial is to show you the complete code (e.g. mechanics) needed to work with preprocessing layers. A few columns have been selected arbitrarily to train our model.
Key point: If your aim is to build an accurate model, try a larger dataset of your own, and think carefully about which features are the most meaningful to include, and how they should be represented.
Earlier, you used a small batch size to demonstrate the input pipeline. Let's now create a new input pipeline with a larger batch size.
End of explanation
"""
all_features = tf.keras.layers.concatenate(encoded_features)
x = tf.keras.layers.Dense(32, activation="relu")(all_features)
x = tf.keras.layers.Dropout(0.5)(x)
output = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(all_inputs, output)
# TODO
# compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"])
"""
Explanation: Create, compile, and train the model
Now you can create our end-to-end model.
End of explanation
"""
# rankdir='LR' is used to make the graph horizontal.
tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
"""
Explanation: Let's visualize our connectivity graph:
End of explanation
"""
# TODO
# train the model
model.fit(train_ds, epochs=10, validation_data=val_ds)
loss, accuracy = model.evaluate(test_ds)
print("Accuracy", accuracy)
"""
Explanation: Train the model
End of explanation
"""
model.save('my_pet_classifier')
reloaded_model = tf.keras.models.load_model('my_pet_classifier')
"""
Explanation: Inference on new data
Key point: The model you have developed can now classify a row from a CSV file directly, because the preprocessing code is included inside the model itself.
You can now save and reload the Keras model. Follow the tutorial here for more information on TensorFlow models.
End of explanation
"""
sample = {
'Type': 'Cat',
'Age': 3,
'Breed1': 'Tabby',
'Gender': 'Male',
'Color1': 'Black',
'Color2': 'White',
'MaturitySize': 'Small',
'FurLength': 'Short',
'Vaccinated': 'No',
'Sterilized': 'No',
'Health': 'Healthy',
'Fee': 100,
'PhotoAmt': 2,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = reloaded_model.predict(input_dict)
prob = tf.nn.sigmoid(predictions[0])
print(
"This particular pet had a %.1f percent probability "
"of getting adopted." % (100 * prob)
)
"""
Explanation: To get a prediction for a new sample, you can simply call model.predict(). There are just two things you need to do:
Wrap scalars into a list so as to have a batch dimension (models only process batches of data, not single samples)
Call convert_to_tensor on each feature
End of explanation
"""
|
lcharleux/argiope | doc/notebooks/mesh/mesh_tutorial.ipynb | gpl-3.0 | %load_ext autoreload
%autoreload 2
import argiope as ag
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from string import Template
%matplotlib nbagg
"""
Explanation: Mesh tutorial
End of explanation
"""
geom_template = """
lc = $lc;
Point(1) = {0,0,0,lc};
Point(2) = {.5,0,0,lc};
Point(3) = {.5,.5,0,lc};
Point(4) = {1,.5,0,lc};
Point(5) = {1,1,0,lc};
Point(6) = {0,1,0,lc};
Point(7) = {.5,.25,0,lc};
Point(8) = {.5,.75,0,lc};
Point(9) = {.125,.875,0,lc};
Point(10) = {.125,.875-.05,0,lc};
Point(11) = {.125,.875+.05,0,lc};
Line(1) = {1,2};
Circle(2) = {2,3,4};
Line(3) = {4,5};
Line(4) = {5,6};
Line(5) = {6,1};
Circle(6) = {7,3,8};
Circle(7) = {8,3,7};
Circle(8) = {10,9,11};
Circle(9) = {11,9,10};
Line Loop(1) = {6,7}; // interior loop
Line Loop(2) = {1,2,3,4,5}; // exterior loop
Line Loop(3) = {8,9};// hole
Plane Surface(2) = {2,1,3}; // exterior surface (with a hole)
Recombine Surface {2};
Physical Surface("SURFACE") = {2};
"""
open("./_workdir/mesh.geo", "w").write(Template(geom_template).substitute(lc = 0.025))
"""
Explanation: Geometry setup
End of explanation
"""
!gmsh -2 ./_workdir/mesh.geo -algo 'delquad'
"""
Explanation: Mesh creation
GMSH can be run directly as a shell command.
End of explanation
"""
ag.utils.run_gmsh(gmsh_path = "gmsh",
gmsh_space = 2,
gmsh_options = "-algo 'del2d'",
name = "./_workdir/mesh.geo")
"""
Explanation: It is even simpler to use the dedicated function in argiope.utils.
End of explanation
"""
mesh = ag.mesh.read_msh("./_workdir/mesh.msh")
mesh.nodes.head()
mesh.elements.head()
"""
Explanation: Mesh reading
End of explanation
"""
def make_mesh(lc, algorithm = "del2d"):
"""
A mesh function that creates a mesh.
"""
open("./_workdir/mesh.geo", "w").write(Template(geom_template).substitute(lc = lc))
ag.utils.run_gmsh(gmsh_path = "gmsh",
gmsh_space = 2,
gmsh_options = "-algo '{0}'".format(algorithm),
name = "./_workdir/mesh.geo")
mesh = ag.mesh.read_msh("./_workdir/mesh.msh")
return mesh
mesh = make_mesh(0.02, "meshadapt")
mesh
fig = plt.figure()
algos = ["frontal", "del2d", "delquad", "meshadapt"]
for i in range(len(algos)):
mesh = make_mesh(0.03, algos[i])
patches = mesh.to_polycollection(edgecolor = "black", linewidth = .5, alpha = 1.)
stats = mesh.stats()
#patches.set_array( stats.stats.max_abs_angular_deviation )
patches.set_array( stats.stats.aspect_ratio )
patches.set_cmap(mpl.cm.terrain)
ax = fig.add_subplot(2, 2, i+1)
ax.set_aspect("equal")
ax.set_xlim(mesh.nodes.coords.x.min(), mesh.nodes.coords.x.max())
ax.set_ylim(mesh.nodes.coords.y.min(), mesh.nodes.coords.y.max())
ax.add_collection(patches)
cbar = plt.colorbar(patches, orientation = "vertical")
cbar.set_label("Max Abs. Ang. Dev. [$^o$]")
ax.set_title(algos[i])
#plt.xlabel("$x$")
#plt.ylabel("$y$")
#plt.grid()
ax.axis('off')
plt.show()
"""
Explanation: Putting it all together
Let's now do all these operations in a single function and do fun stuff.
End of explanation
"""
stats = mesh.stats()
stats.stats.describe().loc[["min", "max", "mean", "std"]][["max_angular_deviation", "aspect_ratio"]]
"""
Explanation: Mesh quality investigation
For example, let's investigate the effect of the mesh algorithm on the overall quality of the mesh.
End of explanation
"""
|
pyro-ppl/numpyro | notebooks/source/bayesian_hierarchical_stacking.ipynb | apache-2.0 | !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro
import os
from IPython.display import set_matplotlib_formats
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import BSpline
import seaborn as sns
import jax
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
plt.style.use("seaborn")
if "NUMPYRO_SPHINXBUILD" in os.environ:
set_matplotlib_formats("svg")
numpyro.set_host_device_count(4)
assert numpyro.__version__.startswith("0.9.2")
%matplotlib inline
"""
Explanation: Bayesian Hierarchical Stacking: Well Switching Case Study
<figure>
<img src="https://i.imgur.com/CiUmZKx.jpeg" width="700px"/>
<figcaption>Photo by Belinda Fewings, https://unsplash.com/photos/6p-KtXCBGNw.</figcaption>
</figure>
Table of Contents
Intro
1. Exploratory Data Analysis
2. Prepare 6 Different Models
2.1 Feature Engineering
2.2 Training
3. Bayesian Hierarchical Stacking
3.1 Prepare stacking datasets
3.2 Define stacking model
4. Evaluate on test set
4.1 Stack predictions
4.2 Compare methods
Conclusion
References
Intro <a class="anchor" id="1"></a>
Suppose you have just fit 6 models to a dataset, and need to choose which one to use to make predictions on your test set. How do you choose which one to use? A couple of common tactics are:
- choose the best model based on cross-validation;
- average the models, using weights based on cross-validation scores.
In the paper Bayesian hierarchical stacking: Some models are (somewhere) useful, a new technique is introduced: average models based on weights which are allowed to vary across according to the input data, based on a hierarchical structure.
Here, we'll implement the first case study from that paper - readers are nonetheless encouraged to look at the original paper to find other cases studies, as well as theoretical results. Code from the article (in R / Stan) can be found here.
End of explanation
"""
wells = pd.read_csv(
"http://stat.columbia.edu/~gelman/arm/examples/arsenic/wells.dat", sep=" "
)
wells.head()
fig, ax = plt.subplots(2, 2, figsize=(12, 6))
fig.suptitle("Target variable plotted against various predictors")
sns.scatterplot(data=wells, x="arsenic", y="switch", ax=ax[0][0])
sns.scatterplot(data=wells, x="dist", y="switch", ax=ax[0][1])
sns.barplot(
data=wells.groupby("assoc")["switch"].mean().reset_index(),
x="assoc",
y="switch",
ax=ax[1][0],
)
ax[1][0].set_ylabel("Proportion switch")
sns.barplot(
data=wells.groupby("educ")["switch"].mean().reset_index(),
x="educ",
y="switch",
ax=ax[1][1],
)
ax[1][1].set_ylabel("Proportion switch");
"""
Explanation: 1. Exploratory Data Analysis <a class="anchor" id="1"></a>
The data we have to work with looks at households in Bangladesh, some of which were affected by high levels of arsenic in their water. Would affected households want to switch to a neighbour's well?
We'll split the data into a train and test set, and then we'll train six different models to try to predict whether households would switch wells. Then, we'll see how we can stack them when predicting on the test set!
But first, let's load it in and visualise it! Each row represents a household, and the features we have available to us are:
switch: whether a household switched to another well;
arsenic: level of arsenic in drinking water;
educ: level of education of "head of household";
dist100: distance to nearest safe-drinking well;
assoc: whether the household participates in any community activities.
End of explanation
"""
np.random.seed(1)
train_id = wells.sample(n=200).index
test_id = wells.loc[~wells.index.isin(train_id)].sample(n=1500).index
y_train = wells.loc[train_id, "switch"].to_numpy()
y_test = wells.loc[test_id, "switch"].to_numpy()
"""
Explanation: Next, we'll choose 200 observations to be part of our train set, and 1500 to be part of our test set.
End of explanation
"""
wells["edu0"] = wells["educ"].isin(np.arange(0, 1)).astype(int)
wells["edu1"] = wells["educ"].isin(np.arange(1, 6)).astype(int)
wells["edu2"] = wells["educ"].isin(np.arange(6, 12)).astype(int)
wells["edu3"] = wells["educ"].isin(np.arange(12, 18)).astype(int)
wells["logarsenic"] = np.log(wells["arsenic"])
wells["assoc_half"] = wells["assoc"] / 2.0
wells["as_square"] = wells["logarsenic"] ** 2
wells["as_third"] = wells["logarsenic"] ** 3
wells["dist100"] = wells["dist"] / 100.0
wells["intercept"] = 1
def bs(x, knots, degree):
"""
Generate the B-spline basis matrix for a polynomial spline.
Parameters
----------
x
predictor variable.
knots
locations of internal breakpoints (not padded).
degree
degree of the piecewise polynomial.
Returns
-------
pd.DataFrame
Spline basis matrix.
Notes
-----
This mirrors ``bs`` from splines package in R.
"""
padded_knots = np.hstack(
[[x.min()] * (degree + 1), knots, [x.max()] * (degree + 1)]
)
return pd.DataFrame(
BSpline(padded_knots, np.eye(len(padded_knots) - degree - 1), degree)(x)[:, 1:],
index=x.index,
)
knots = np.quantile(wells.loc[train_id, "logarsenic"], np.linspace(0.1, 0.9, num=10))
spline_arsenic = bs(wells["logarsenic"], knots=knots, degree=3)
knots = np.quantile(wells.loc[train_id, "dist100"], np.linspace(0.1, 0.9, num=10))
spline_dist = bs(wells["dist100"], knots=knots, degree=3)
features_0 = ["intercept", "dist100", "arsenic", "assoc", "edu1", "edu2", "edu3"]
features_1 = ["intercept", "dist100", "logarsenic", "assoc", "edu1", "edu2", "edu3"]
features_2 = [
"intercept",
"dist100",
"arsenic",
"as_third",
"as_square",
"assoc",
"edu1",
"edu2",
"edu3",
]
features_3 = ["intercept", "dist100", "assoc", "edu1", "edu2", "edu3"]
features_4 = ["intercept", "logarsenic", "assoc", "edu1", "edu2", "edu3"]
features_5 = ["intercept", "dist100", "logarsenic", "assoc", "educ"]
X0 = wells.loc[train_id, features_0].to_numpy()
X1 = wells.loc[train_id, features_1].to_numpy()
X2 = wells.loc[train_id, features_2].to_numpy()
X3 = (
pd.concat([wells.loc[:, features_3], spline_arsenic], axis=1)
.loc[train_id]
.to_numpy()
)
X4 = pd.concat([wells.loc[:, features_4], spline_dist], axis=1).loc[train_id].to_numpy()
X5 = wells.loc[train_id, features_5].to_numpy()
X0_test = wells.loc[test_id, features_0].to_numpy()
X1_test = wells.loc[test_id, features_1].to_numpy()
X2_test = wells.loc[test_id, features_2].to_numpy()
X3_test = (
pd.concat([wells.loc[:, features_3], spline_arsenic], axis=1)
.loc[test_id]
.to_numpy()
)
X4_test = (
pd.concat([wells.loc[:, features_4], spline_dist], axis=1).loc[test_id].to_numpy()
)
X5_test = wells.loc[test_id, features_5].to_numpy()
train_x_list = [X0, X1, X2, X3, X4, X5]
test_x_list = [X0_test, X1_test, X2_test, X3_test, X4_test, X5_test]
K = len(train_x_list)
"""
Explanation: 2. Prepare 6 different candidate models <a class="anchor" id="2"></a>
2.1 Feature Engineering <a class="anchor" id="2.1"></a>
First, let's add a few new columns:
- edu0: whether educ is 0,
- edu1: whether educ is between 1 and 5,
- edu2: whether educ is between 6 and 11,
- edu3: whether educ is between 12 and 17,
- logarsenic: natural logarithm of arsenic,
- assoc_half: half of assoc,
- as_square: natural logarithm of arsenic, squared,
- as_third: natural logarithm of arsenic, cubed,
- dist100: dist divided by 100,
- intercept: just a columns of 1s.
We're going to start by fitting 6 different models to our train set:
logistic regression using intercept, arsenic, assoc, edu1, edu2, and edu3;
same as above, but with logarsenic instead of arsenic;
same as the first one, but with square and cubic features as well;
same as the first one, but with spline features derived from logarsenic as well;
same as the first one, but with spline features derived from dist100 as well;
same as the first one, but with educ instead of the binary edu variables.
End of explanation
"""
def logistic(x, y=None):
beta = numpyro.sample("beta", dist.Normal(0, 3).expand([x.shape[1]]))
logits = numpyro.deterministic("logits", jnp.matmul(x, beta))
numpyro.sample(
"obs",
dist.Bernoulli(logits=logits),
obs=y,
)
fit_list = []
for k in range(K):
sampler = numpyro.infer.NUTS(logistic)
mcmc = numpyro.infer.MCMC(
sampler, num_chains=4, num_samples=1000, num_warmup=1000, progress_bar=False
)
rng_key = jax.random.fold_in(jax.random.PRNGKey(13), k)
mcmc.run(rng_key, x=train_x_list[k], y=y_train)
fit_list.append(mcmc)
"""
Explanation: 2.2 Training <a class="anchor" id="2.2"></a>
Each model will be trained in the same way - with a Bernoulli likelihood and a logit link function.
End of explanation
"""
def find_point_wise_loo_score(fit):
return az.loo(az.from_numpyro(fit), pointwise=True, scale="log").loo_i.values
lpd_point = np.vstack([find_point_wise_loo_score(fit) for fit in fit_list]).T
exp_lpd_point = np.exp(lpd_point)
"""
Explanation: 2.3 Estimate leave-one-out cross-validated score for each training point <a class="anchor" id="2.3"></a>
Rather than refitting each model 100 times, we will estimate the leave-one-out cross-validated score using LOO.
End of explanation
"""
dist100_median = wells.loc[wells.index[train_id], "dist100"].median()
logarsenic_median = wells.loc[wells.index[train_id], "logarsenic"].median()
wells["dist100_l"] = (wells["dist100"] - dist100_median).clip(upper=0)
wells["dist100_r"] = (wells["dist100"] - dist100_median).clip(lower=0)
wells["logarsenic_l"] = (wells["logarsenic"] - logarsenic_median).clip(upper=0)
wells["logarsenic_r"] = (wells["logarsenic"] - logarsenic_median).clip(lower=0)
stacking_features = [
"edu0",
"edu1",
"edu2",
"edu3",
"assoc_half",
"dist100_l",
"dist100_r",
"logarsenic_l",
"logarsenic_r",
]
X_stacking_train = wells.loc[train_id, stacking_features].to_numpy()
X_stacking_test = wells.loc[test_id, stacking_features].to_numpy()
"""
Explanation: 3. Bayesian Hierarchical Stacking <a class="anchor" id="3"></a>
3.1 Prepare stacking datasets <a class="anchor" id="3.1"></a>
To determine how the stacking weights should vary across training and test sets, we will need to create "stacking datasets" which include all the features which we want the stacking weights to depend on. How should such features be included? For discrete features, this is easy, we just one-hot-encode them. But for continuous features, we need a trick. In Equation (16), the authors recommend the following: if you have a continuous feature f, then replace it with the following two features:
f_l: f minus the median of f, clipped above at 0;
f_r: f minus the median of f, clipped below at 0;
End of explanation
"""
def stacking(
X,
d_discrete,
X_test,
exp_lpd_point,
tau_mu,
tau_sigma,
*,
test,
):
"""
Get weights with which to stack candidate models' predictions.
Parameters
----------
X
Training stacking matrix: features on which stacking weights should depend, for the
training set.
d_discrete
Number of discrete features in `X` and `X_test`. The first `d_discrete` features
from these matrices should be the discrete ones, with the continuous ones coming
after them.
X_test
Test stacking matrix: features on which stacking weights should depend, for the
testing set.
exp_lpd_point
LOO score evaluated at each point in the training set, for each candidate model.
tau_mu
Hyperprior for mean of `beta`, for discrete features.
tau_sigma
Hyperprior for standard deviation of `beta`, for continuous features.
test
Whether to calculate stacking weights for test set.
Notes
-----
Naming of variables mirrors what's used in the original paper.
"""
N = X.shape[0]
d = X.shape[1]
N_test = X_test.shape[0]
K = lpd_point.shape[1] # number of candidate models
with numpyro.plate("Candidate models", K - 1, dim=-2):
# mean effect of discrete features on stacking weights
mu = numpyro.sample("mu", dist.Normal(0, tau_mu))
# standard deviation effect of discrete features on stacking weights
sigma = numpyro.sample("sigma", dist.HalfNormal(scale=tau_sigma))
with numpyro.plate("Discrete features", d_discrete, dim=-1):
# effect of discrete features on stacking weights
tau = numpyro.sample("tau", dist.Normal(0, 1))
with numpyro.plate("Continuous features", d - d_discrete, dim=-1):
# effect of continuous features on stacking weights
beta_con = numpyro.sample("beta_con", dist.Normal(0, 1))
# effects of features on stacking weights
beta = numpyro.deterministic(
"beta", jnp.hstack([(sigma.squeeze() * tau.T + mu.squeeze()).T, beta_con])
)
assert beta.shape == (K - 1, d)
# stacking weights (in unconstrained space)
f = jnp.hstack([X @ beta.T, jnp.zeros((N, 1))])
assert f.shape == (N, K)
# log probability of LOO training scores weighted by stacking weights.
log_w = jax.nn.log_softmax(f, axis=1)
# stacking weights (constrained to sum to 1)
numpyro.deterministic("w", jnp.exp(log_w))
logp = jax.nn.logsumexp(lpd_point + log_w, axis=1)
numpyro.factor("logp", jnp.sum(logp))
if test:
# test set stacking weights (in unconstrained space)
f_test = jnp.hstack([X_test @ beta.T, jnp.zeros((N_test, 1))])
# test set stacking weights (constrained to sum to 1)
w_test = numpyro.deterministic("w_test", jax.nn.softmax(f_test, axis=1))
sampler = numpyro.infer.NUTS(stacking)
mcmc = numpyro.infer.MCMC(
sampler, num_chains=4, num_samples=1000, num_warmup=1000, progress_bar=False
)
mcmc.run(
jax.random.PRNGKey(17),
X=X_stacking_train,
d_discrete=4,
X_test=X_stacking_test,
exp_lpd_point=exp_lpd_point,
tau_mu=1.0,
tau_sigma=0.5,
test=True,
)
trace = mcmc.get_samples()
"""
Explanation: 3.2 Define stacking model <a class="anchor" id="3.2"></a>
What we seek to find is a matrix of weights $W$ with which to multiply the models' predictions. Let's define a matrix $Pred$ such that $Pred_{i,k}$ represents the prediction made for point $i$ by model $k$. Then the final prediction for point $i$ will then be:
$$ \sum_k W_{i, k}Pred_{i,k} $$
Such a matrix $W$ would be required to have each column sum to $1$. Hence, we calculate each row $W_i$ of $W$ as:
$$ W_i = \text{softmax}(X_\text{stacking}_i \cdot \beta), $$
where $\beta$ is a matrix whose values we seek to determine. For the discrete features, $\beta$ is given a hierarchical structure over the possible inputs. Continuous features, on the other hand, get no hierarchical structure in this case study and just vary according to the input values.
Notice how, for the discrete features, a non-centered parametrisation is used. Also note that we only need to estimate K-1 columns of $\beta$, because the weights W_{i, k} will have to sum to 1 for each i.
End of explanation
"""
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 6), sharey=True)
training_stacking_weights = trace["w"].mean(axis=0)
sns.scatterplot(data=pd.DataFrame(training_stacking_weights), ax=ax[0])
fixed_weights = (
az.compare({idx: fit for idx, fit in enumerate(fit_list)}, method="stacking")
.sort_index()["weight"]
.to_numpy()
)
fixed_weights_df = pd.DataFrame(
np.repeat(
fixed_weights[jnp.newaxis, :],
len(X_stacking_train),
axis=0,
)
)
sns.scatterplot(data=fixed_weights_df, ax=ax[1])
ax[0].set_title("Training weights from Bayesian Hierarchical stacking")
ax[1].set_title("Fixed weights stacking")
ax[0].set_xlabel("Index")
ax[1].set_xlabel("Index")
fig.suptitle(
"Bayesian Hierarchical Stacking weights can vary according to the input",
fontsize=18,
)
fig.tight_layout();
"""
Explanation: We can now extract the weights with which to weight the different models from the posterior, and then visualise how they vary across the training set.
Let's compare them with what the weights would've been if we'd just used fixed stacking weights (computed using ArviZ - see their docs for details).
End of explanation
"""
# for each candidate model, extract the posterior predictive logits
train_preds = []
for k in range(K):
predictive = numpyro.infer.Predictive(logistic, fit_list[k].get_samples())
rng_key = jax.random.fold_in(jax.random.PRNGKey(19), k)
train_pred = predictive(rng_key, x=train_x_list[k])["logits"]
train_preds.append(train_pred.mean(axis=0))
# reshape, so we have (N, K)
train_preds = np.vstack(train_preds).T
# same as previous cell, but for test set
test_preds = []
for k in range(K):
predictive = numpyro.infer.Predictive(logistic, fit_list[k].get_samples())
rng_key = jax.random.fold_in(jax.random.PRNGKey(20), k)
test_pred = predictive(rng_key, x=test_x_list[k])["logits"]
test_preds.append(test_pred.mean(axis=0))
test_preds = np.vstack(test_preds).T
# get the stacking weights for the test set
test_stacking_weights = trace["w_test"].mean(axis=0)
# get predictions using the stacking weights
bhs_predictions = (test_stacking_weights * test_preds).sum(axis=1)
# get predictions using only the model with the best LOO score
model_selection_preds = test_preds[:, lpd_point.sum(axis=0).argmax()]
# get predictions using fixed stacking weights, dependent on the LOO score
fixed_weights_preds = (fixed_weights * test_preds).sum(axis=1)
"""
Explanation: 4. Evaluate on test set <a class="anchor" id="4"></a>
4.1 Stack predictions <a class="anchor" id="4.1"></a>
Now, for each model, let's evaluate the log predictive density for each point in the test set. Once we have predictions for each model, we need to think about how to combine them, such that for each test point, we get a single prediction.
We decided we'd do this in three ways:
- Bayesian Hierarchical Stacking (bhs_pred);
- choosing the model with the best training set LOO score (model_selection_preds);
- fixed-weights stacking (fixed_weights_preds).
End of explanation
"""
fig, ax = plt.subplots(figsize=(12, 6))
neg_log_pred_densities = np.vstack(
[
-dist.Bernoulli(logits=bhs_predictions).log_prob(y_test),
-dist.Bernoulli(logits=model_selection_preds).log_prob(y_test),
-dist.Bernoulli(logits=fixed_weights_preds).log_prob(y_test),
]
).T
neg_log_pred_density = pd.DataFrame(
neg_log_pred_densities,
columns=[
"Bayesian Hierarchical Stacking",
"Model selection",
"Fixed stacking weights",
],
)
sns.barplot(
data=neg_log_pred_density.reindex(
columns=neg_log_pred_density.mean(axis=0).sort_values(ascending=False).index
),
orient="h",
ax=ax,
)
ax.set_title(
"Bayesian Hierarchical Stacking performs best here", fontdict={"fontsize": 18}
)
ax.set_xlabel("Negative mean log predictive density (lower is better)");
"""
Explanation: 4.2 Compare methods <a class="anchor" id="4.2"></a>
Let's compare the negative log predictive density scores on the test set (note - lower is better):
End of explanation
"""
|
sorig/shogun | doc/ipython-notebooks/classification/MKL.ipynb | bsd-3-clause | %pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all shogun classes
from shogun import *
"""
Explanation: Multiple Kernel Learning
By Saurabh Mahindre - <a href="https://github.com/Saurabh7">github.com/Saurabh7</a>
This notebook is about multiple kernel learning in shogun. We will see how to construct a combined kernel, determine optimal kernel weights using MKL and use it for different types of classification and novelty detection.
Introduction
Mathematical formulation
Using a Combined kernel
Example: Toy Data
Generating Kernel weights
Binary classification using MKL
MKL for knowledge discovery
Multiclass classification using MKL
One-class classification using MKL
End of explanation
"""
kernel = CombinedKernel()
"""
Explanation: Introduction
<em>Multiple kernel learning</em> (MKL) is about using a combined kernel i.e. a kernel consisting of a linear combination of arbitrary kernels over different domains. The coefficients or weights of the linear combination can be learned as well.
Kernel based methods such as support vector machines (SVMs) employ a so-called kernel function $k(x_{i},x_{j})$ which intuitively computes the similarity between two examples $x_{i}$ and $x_{j}$. </br>
Selecting the kernel function
$k()$ and it's parameters is an important issue in training. Kernels designed by humans usually capture one aspect of data. Choosing one kernel means to select exactly one such aspect. Which means combining such aspects is often better than selecting.
In shogun the CMKL is the base class for MKL. We can do classifications: binary, one-class, multiclass and regression too: regression.
Mathematical formulation (skip if you just want code examples)
</br>In a SVM, defined as:
$$f({\bf x})=\text{sign} \left(\sum_{i=0}^{N-1} \alpha_i k({\bf x}, {\bf x_i})+b\right)$$</br>
where ${\bf x_i},{i = 1,...,N}$ are labeled training examples ($y_i \in {±1}$).
One could make a combination of kernels like:
$${\bf k}(x_i,x_j)=\sum_{k=0}^{K} \beta_k {\bf k_k}(x_i, x_j)$$
where $\beta_k > 0$ and $\sum_{k=0}^{K} \beta_k = 1$
In the multiple kernel learning problem for binary classification one is given $N$ data points ($x_i, y_i$ )
($y_i \in {±1}$), where $x_i$ is translated via $K$ mappings $\phi_k(x) \rightarrow R^{D_k} $, $k=1,...,K$ , from the input into $K$ feature spaces $(\phi_1(x_i),...,\phi_K(x_i))$ where $D_k$ denotes dimensionality of the $k$-th feature space.
In MKL $\alpha_i$,$\beta$ and bias are determined by solving the following optimization program. For details see [1].
$$\mbox{min} \hspace{4mm} \gamma-\sum_{i=1}^N\alpha_i$$
$$ \mbox{w.r.t.} \hspace{4mm} \gamma\in R, \alpha\in R^N \nonumber$$
$$\mbox {s.t.} \hspace{4mm} {\bf 0}\leq\alpha\leq{\bf 1}C,\;\;\sum_{i=1}^N \alpha_i y_i=0 \nonumber$$
$$ {\frac{1}{2}\sum_{i,j=1}^N \alpha_i \alpha_j y_i y_j \leq \gamma}, \forall k=1,\ldots,K\nonumber\
$$
Here C is a pre-specified regularization parameter.
Within shogun this optimization problem is solved using semi-infinite programming. For 1-norm MKL one of the two approaches described in [1] is used.
The first approach (also called the wrapper algorithm) wraps around a single kernel SVMs, alternatingly solving for $\alpha$ and $\beta$. It is using a traditional SVM to generate new violated constraints and thus requires a single kernel SVM and any of the SVMs contained in shogun can be used. In the MKL step either a linear program is solved via glpk or cplex or analytically or a newton (for norms>1) step is performed.
The second much faster but also more memory demanding approach performing interleaved optimization, is integrated into the chunking-based SVMlight.
Using a Combined kernel
Shogun provides an easy way to make combination of kernels using the CombinedKernel class, to which we can append any kernel from the many options shogun provides. It is especially useful to combine kernels working on different domains and to combine kernels looking at independent features and requires CombinedFeatures to be used. Similarly the CombinedFeatures is used to combine a number of feature objects into a single CombinedFeatures object
End of explanation
"""
num=30;
num_components=4
means=zeros((num_components, 2))
means[0]=[-1,1]
means[1]=[2,-1.5]
means[2]=[-1,-3]
means[3]=[2,1]
covs=array([[1.0,0.0],[0.0,1.0]])
gmm=GMM(num_components)
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
[gmm.set_nth_cov(covs,i) for i in range(num_components)]
gmm.set_coef(array([1.0,0.0,0.0,0.0]))
xntr=array([gmm.sample() for i in range(num)]).T
xnte=array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(array([0.0,1.0,0.0,0.0]))
xntr1=array([gmm.sample() for i in range(num)]).T
xnte1=array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(array([0.0,0.0,1.0,0.0]))
xptr=array([gmm.sample() for i in range(num)]).T
xpte=array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(array([0.0,0.0,0.0,1.0]))
xptr1=array([gmm.sample() for i in range(num)]).T
xpte1=array([gmm.sample() for i in range(5000)]).T
traindata=concatenate((xntr,xntr1,xptr,xptr1), axis=1)
trainlab=concatenate((-ones(2*num), ones(2*num)))
testdata=concatenate((xnte,xnte1,xpte,xpte1), axis=1)
testlab=concatenate((-ones(10000), ones(10000)))
#convert to shogun features and generate labels for data
feats_train=features(traindata)
labels=BinaryLabels(trainlab)
_=jet()
figure(figsize(18,5))
subplot(121)
# plot train data
_=scatter(traindata[0,:], traindata[1,:], c=trainlab, s=100)
title('Toy data for classification')
axis('equal')
colors=["blue","blue","red","red"]
# a tool for visualisation
from matplotlib.patches import Ellipse
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
vals, vecs = eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
for i in range(num_components):
gca().add_artist(get_gaussian_ellipse_artist(means[i], covs, color=colors[i]))
"""
Explanation: Prediction on toy data
In order to see the prediction capabilities, let us generate some data using the GMM class. The data is sampled by setting means (GMM notebook) such that it sufficiently covers X-Y grid and is not too easy to classify.
End of explanation
"""
width0=0.5
kernel0=GaussianKernel(feats_train, feats_train, width0)
width1=25
kernel1=GaussianKernel(feats_train, feats_train, width1)
#combine kernels
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.init(feats_train, feats_train)
mkl = MKLClassification()
#set the norm, weights sum to 1.
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(labels)
#train to get weights
mkl.train()
w=kernel.get_subkernel_weights()
print(w)
"""
Explanation: Generating Kernel weights
Just to help us visualize let's use two gaussian kernels (CGaussianKernel) with considerably different widths. As required in MKL, we need to append them to the Combined kernel. To generate the optimal weights (i.e $\beta$s in the above equation), training of MKL is required. This generates the weights as seen in this example.
End of explanation
"""
size=100
x1=linspace(-5, 5, size)
x2=linspace(-5, 5, size)
x, y=meshgrid(x1, x2)
#Generate X-Y grid test data
grid=features(array((ravel(x), ravel(y))))
kernel0t=GaussianKernel(feats_train, grid, width0)
kernel1t=GaussianKernel(feats_train, grid, width1)
kernelt=CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
#initailize with test grid
kernelt.init(feats_train, grid)
mkl.set_kernel(kernelt)
#prediction
grid_out=mkl.apply()
z=grid_out.get_values().reshape((size, size))
figure(figsize=(10,5))
title("Classification using MKL")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
"""
Explanation: Binary classification using MKL
Now with the data ready and training done, we can do the binary classification. The weights generated can be intuitively understood. We will see that on plotting individual subkernels outputs and outputs of the MKL classification. To apply on test features, we need to reinitialize the kernel with kernel.init and pass the test features. After that it's just a matter of doing mkl.apply to generate outputs.
End of explanation
"""
z=grid_out.get_labels().reshape((size, size))
# MKL
figure(figsize=(20,5))
subplot(131, title="Multiple Kernels combined")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
comb_ker0=CombinedKernel()
comb_ker0.append_kernel(kernel0)
comb_ker0.init(feats_train, feats_train)
mkl.set_kernel(comb_ker0)
mkl.train()
comb_ker0t=CombinedKernel()
comb_ker0t.append_kernel(kernel0)
comb_ker0t.init(feats_train, grid)
mkl.set_kernel(comb_ker0t)
out0=mkl.apply()
# subkernel 1
z=out0.get_labels().reshape((size, size))
subplot(132, title="Kernel 1")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
comb_ker1=CombinedKernel()
comb_ker1.append_kernel(kernel1)
comb_ker1.init(feats_train, feats_train)
mkl.set_kernel(comb_ker1)
mkl.train()
comb_ker1t=CombinedKernel()
comb_ker1t.append_kernel(kernel1)
comb_ker1t.init(feats_train, grid)
mkl.set_kernel(comb_ker1t)
out1=mkl.apply()
# subkernel 2
z=out1.get_labels().reshape((size, size))
subplot(133, title="kernel 2")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
"""
Explanation: To justify the weights, let's train and compare two subkernels with the MKL classification output. Training MKL classifier with a single kernel appended to a combined kernel makes no sense and is just like normal single kernel based classification, but let's do it for comparison.
End of explanation
"""
kernelt.init(feats_train, features(testdata))
mkl.set_kernel(kernelt)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print("Test error is %2.2f%% :MKL" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))
comb_ker0t.init(feats_train,features(testdata))
mkl.set_kernel(comb_ker0t)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print("Test error is %2.2f%% :Subkernel1"% (100*evaluator.evaluate(out,BinaryLabels(testlab))))
comb_ker1t.init(feats_train, features(testdata))
mkl.set_kernel(comb_ker1t)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print("Test error is %2.2f%% :subkernel2" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))
"""
Explanation: As we can see the multiple kernel output seems just about right. Kernel 1 gives a sort of overfitting output while the kernel 2 seems not so accurate. The kernel weights are hence so adjusted to get a refined output. We can have a look at the errors by these subkernels to have more food for thought. Most of the time, the MKL error is lesser as it incorporates aspects of both kernels. One of them is strict while other is lenient, MKL finds a balance between those.
End of explanation
"""
def circle(x, radius, neg):
y=sqrt(square(radius)-square(x))
if neg:
return[x, -y]
else:
return [x,y]
def get_circle(radius):
neg=False
range0=linspace(-radius,radius,100)
pos_a=array([circle(i, radius, neg) for i in range0]).T
neg=True
neg_a=array([circle(i, radius, neg) for i in range0]).T
c=concatenate((neg_a,pos_a), axis=1)
return c
def get_data(r1, r2):
c1=get_circle(r1)
c2=get_circle(r2)
c=concatenate((c1, c2), axis=1)
feats_tr=features(c)
return c, feats_tr
l=concatenate((-ones(200),ones(200)))
lab=BinaryLabels(l)
#get two circles with radius 2 and 4
c, feats_tr=get_data(2,4)
c1, feats_tr1=get_data(2,3)
_=gray()
figure(figsize=(10,5))
subplot(121)
title("Circles with different separation")
p=scatter(c[0,:], c[1,:], c=lab)
subplot(122)
q=scatter(c1[0,:], c1[1,:], c=lab)
"""
Explanation: MKL for knowledge discovery
MKL can recover information about the problem at hand. Let us see this with a binary classification problem. The task is to separate two concentric classes shaped like circles. By varying the distance between the boundary of the circles we can control the separability of the problem. Starting with an almost non-separable scenario, the data quickly becomes separable as the distance between the circles increases.
End of explanation
"""
def train_mkl(circles, feats_tr):
#Four kernels with different widths
kernel0=GaussianKernel(feats_tr, feats_tr, 1)
kernel1=GaussianKernel(feats_tr, feats_tr, 5)
kernel2=GaussianKernel(feats_tr, feats_tr, 7)
kernel3=GaussianKernel(feats_tr, feats_tr, 10)
kernel = CombinedKernel()
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.append_kernel(kernel2)
kernel.append_kernel(kernel3)
kernel.init(feats_tr, feats_tr)
mkl = MKLClassification()
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(lab)
mkl.train()
w=kernel.get_subkernel_weights()
return w, mkl
def test_mkl(mkl, grid):
kernel0t=GaussianKernel(feats_tr, grid, 1)
kernel1t=GaussianKernel(feats_tr, grid, 5)
kernel2t=GaussianKernel(feats_tr, grid, 7)
kernel3t=GaussianKernel(feats_tr, grid, 10)
kernelt = CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
kernelt.append_kernel(kernel2t)
kernelt.append_kernel(kernel3t)
kernelt.init(feats_tr, grid)
mkl.set_kernel(kernelt)
out=mkl.apply()
return out
size=50
x1=linspace(-10, 10, size)
x2=linspace(-10, 10, size)
x, y=meshgrid(x1, x2)
grid=features(array((ravel(x), ravel(y))))
w, mkl=train_mkl(c, feats_tr)
print(w)
out=test_mkl(mkl,grid)
z=out.get_values().reshape((size, size))
figure(figsize=(5,5))
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
title('classification with constant separation')
_=colorbar(c)
"""
Explanation: These are the type of circles we want to distinguish between. We can try classification with a constant separation between the circles first.
End of explanation
"""
range1=linspace(5.5,7.5,50)
x=linspace(1.5,3.5,50)
temp=[]
for i in range1:
#vary separation between circles
c, feats=get_data(4,i)
w, mkl=train_mkl(c, feats)
temp.append(w)
y=array([temp[i] for i in range(0,50)]).T
figure(figsize=(20,5))
_=plot(x, y[0,:], color='k', linewidth=2)
_=plot(x, y[1,:], color='r', linewidth=2)
_=plot(x, y[2,:], color='g', linewidth=2)
_=plot(x, y[3,:], color='y', linewidth=2)
title("Comparison between kernel widths and weights")
ylabel("Weight")
xlabel("Distance between circles")
_=legend(["1","5","7","10"])
"""
Explanation: As we can see the MKL classifier classifies them as expected. Now let's vary the separation and see how it affects the weights.The choice of the kernel width of the Gaussian kernel used for classification is expected to depend on the separation distance of the learning problem. An increased distance between the circles will correspond to a larger optimal kernel width. This effect should be visible in the results of the MKL, where we used MKL-SVMs with four kernels with different widths (1,5,7,10).
End of explanation
"""
from scipy.io import loadmat, savemat
from os import path, sep
mat = loadmat(sep.join(['..','..','..','data','multiclass', 'usps.mat']))
Xall = mat['data']
Yall = array(mat['label'].squeeze(), dtype=double)
# map from 1..10 to 0..9, since shogun
# requires multiclass labels to be
# 0, 1, ..., K-1
Yall = Yall - 1
random.seed(0)
subset = random.permutation(len(Yall))
#get first 1000 examples
Xtrain = Xall[:, subset[:1000]]
Ytrain = Yall[subset[:1000]]
Nsplit = 2
all_ks = range(1, 21)
print(Xall.shape)
print(Xtrain.shape)
"""
Explanation: In the above plot we see the kernel weightings obtained for the four kernels. Every line shows one weighting. The courses of the kernel weightings reflect the development of the learning problem: as long as the problem is difficult the best separation can be obtained when using the kernel with smallest width. The low width kernel looses importance when the distance between the circle increases and larger kernel widths obtain a larger weight in MKL. Increasing the distance between the circles, kernels with greater widths are used.
Multiclass classification using MKL
MKL can be used for multiclass classification using the MKLMulticlass class. It is based on the GMNPSVM Multiclass SVM. Its termination criterion is set by set_mkl_epsilon(float64_t eps ) and the maximal number of MKL iterations is set by set_max_num_mkliters(int32_t maxnum). The epsilon termination criterion is the L2 norm between the current MKL weights and their counterpart from the previous iteration. We set it to 0.001 as we want pretty accurate weights.
To see this in action let us compare it to the normal GMNPSVM example as in the KNN notebook, just to see how MKL fares in object recognition. We use the USPS digit recognition dataset.
End of explanation
"""
def plot_example(dat, lab):
for i in range(5):
ax=subplot(1,5,i+1)
title(int(lab[i]))
ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')
ax.set_xticks([])
ax.set_yticks([])
_=figure(figsize=(17,6))
gray()
plot_example(Xtrain, Ytrain)
"""
Explanation: Let's plot five of the examples to get a feel of the dataset.
End of explanation
"""
# MKL training and output
labels = MulticlassLabels(Ytrain)
feats = features(Xtrain)
#get test data from 5500 onwards
Xrem=Xall[:,subset[5500:]]
Yrem=Yall[subset[5500:]]
#test features not used in training
feats_rem=features(Xrem)
labels_rem=MulticlassLabels(Yrem)
kernel = CombinedKernel()
feats_train = CombinedFeatures()
feats_test = CombinedFeatures()
#append gaussian kernel
subkernel = GaussianKernel(10,15)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = features(Xtrain)
subkernel = PolyKernel(10,2)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = MKLMulticlass(1.2, kernel, labels)
mkl.set_epsilon(1e-2)
mkl.set_mkl_epsilon(0.001)
mkl.set_mkl_norm(1)
mkl.train()
#initialize with test features
kernel.init(feats_train, feats_test)
out = mkl.apply()
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
w=kernel.get_subkernel_weights()
print(w)
# Single kernel:PolyKernel
C=1
pk=PolyKernel(10,2)
svm=GMNPSVM(C, pk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
#Single Kernel:Gaussian kernel
width=15
C=1
gk=GaussianKernel()
gk.set_width(width)
svm=GMNPSVM(C, gk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
"""
Explanation: We combine a Gaussian kernel and a PolyKernel. To test, examples not included in training data are used.
This is just a demonstration but we can see here how MKL is working behind the scene. What we have is two kernels with significantly different properties. The gaussian kernel defines a function space that is a lot larger than that of the linear kernel or the polynomial kernel. The gaussian kernel has a low width, so it will be able to represent more and more complex relationships between the training data. But it requires enough data to train on. The number of training examples here is 1000, which seems a bit less as total examples are 10000. We hope the polynomial kernel can counter this problem, since it will fit the polynomial for you using a lot less data than the squared exponential. The kernel weights are printed below to add some insight.
End of explanation
"""
X = -0.3 * random.randn(100,2)
traindata=r_[X + 2, X - 2].T
X = -0.3 * random.randn(20, 2)
testdata = r_[X + 2, X - 2].T
trainlab=concatenate((ones(99),-ones(1)))
#convert to shogun features and generate labels for data
feats=features(traindata)
labels=BinaryLabels(trainlab)
xx, yy = meshgrid(linspace(-5, 5, 500), linspace(-5, 5, 500))
grid=features(array((ravel(xx), ravel(yy))))
#test features
feats_t=features(testdata)
x_out=(random.uniform(low=-4, high=4, size=(20, 2))).T
feats_out=features(x_out)
kernel=CombinedKernel()
feats_train=CombinedFeatures()
feats_test=CombinedFeatures()
feats_test_out=CombinedFeatures()
feats_grid=CombinedFeatures()
#append gaussian kernel
subkernel=GaussianKernel(10,8)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = features(traindata)
subkernel = PolyKernel(10,3)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = MKLOneClass()
mkl.set_kernel(kernel)
mkl.set_labels(labels)
mkl.set_interleaved_optimization_enabled(False)
mkl.set_epsilon(1e-2)
mkl.put('mkl_epsilon', 0.1)
mkl.set_mkl_norm(1)
"""
Explanation: The misclassified examples are surely pretty tough to predict. As seen from the accuracy MKL seems to work a shade better in the case. One could try this out with more and different types of kernels too.
One-class classification using MKL
One-class classification can be done using MKL in shogun. This is demonstrated in the following simple example using CMKLOneClass. We will see how abnormal data is detected. This is also known as novelty detection. Below we generate some toy data and initialize combined kernels and features.
End of explanation
"""
mkl.train()
print("Weights:")
w=kernel.get_subkernel_weights()
print(w)
#initialize with test features
kernel.init(feats_train, feats_test)
normal_out = mkl.apply()
#test on abnormally generated data
kernel.init(feats_train, feats_test_out)
abnormal_out = mkl.apply()
#test on X-Y grid
kernel.init(feats_train, feats_grid)
grid_out=mkl.apply()
z=grid_out.get_values().reshape((500,500))
z_lab=grid_out.get_labels().reshape((500,500))
a=abnormal_out.get_labels()
n=normal_out.get_labels()
#check for normal and abnormal classified data
idx=where(normal_out.get_labels() != 1)[0]
abnormal=testdata[:,idx]
idx=where(normal_out.get_labels() == 1)[0]
normal=testdata[:,idx]
figure(figsize(15,6))
pl =subplot(121)
title("One-class classification using MKL")
_=pink()
c=pcolor(xx, yy, z)
_=contour(xx, yy, z_lab, linewidths=1, colors='black', hold=True)
_=colorbar(c)
p1=pl.scatter(traindata[0, :], traindata[1,:], cmap=gray(), s=100)
p2=pl.scatter(normal[0,:], normal[1,:], c="red", s=100)
p3=pl.scatter(abnormal[0,:], abnormal[1,:], c="blue", s=100)
p4=pl.scatter(x_out[0,:], x_out[1,:], c=a, cmap=jet(), s=100)
_=pl.legend((p1, p2, p3), ["Training samples", "normal samples", "abnormal samples"], loc=2)
subplot(122)
c=pcolor(xx, yy, z)
title("One-class classification output")
_=gray()
_=contour(xx, yy, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
"""
Explanation: Now that everything is initialized, let's see MKLOneclass in action by applying it on the test data and on the X-Y grid.
End of explanation
"""
|
hamzamerzic/ml-playground | notebooks/two-layer-net.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Data generation obtained from http://cs231n.github.io/neural-networks-case-study/
def generate_data(N, K):
D = 2 # Dimensionality
X = np.zeros((N * K, D)) # Data matrix (each row = single example)
y = np.zeros(N * K, dtype='uint8') # Class labels
for j in xrange(K):
ix = range(N * j, N * (j + 1))
r = np.linspace(0.0, 1, N) # radius
t = np.linspace(j * 8, (j + 1) * 8, N) + np.random.randn(N) * 0.2 # theta
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
y[ix] = j
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral) # Visualize
plt.xlim([-1,1])
plt.ylim([-1,1])
return X, y
# Example:
generate_data(300, 3);
"""
Explanation: Two-layer neural network
In this notebook a two-layer neural network is implemented from scratch following the methodology of the course http://cs231n.github.io/.
The structure of the network is the following:
INPUT -> FC -> ReLU -> FC -> OUTPUT -> SOFTMAX LOSS.
The goal of this notebook is to store some of my thoughts obtained while going through the course. Especially notes about backpropagation in case of composition of functions, since I found it difficult to fully understand w.r.t. (with respect to) which variable the derivatives are computed at each stage.
Important concept presented here is distinguishing between the layer's input and output, which makes the understanding of backpropagation easier.
End of explanation
"""
# Initialization
def initialize(num_inputs, num_hidden):
# +1 is added to account for the bias trick.
W1 = np.random.randn(num_inputs + 1, num_hidden) * np.sqrt(2.0 / num_inputs + 1)
W2 = np.random.randn(num_hidden + 1, num_classes) * np.sqrt(2.0 / (num_hidden + 1))
return W1, W2
"""
Explanation: The initial variance is scaled by a factor of $\sqrt[]{\frac{2}{N}}$, where $N$ is the number of inputs to each neuron in the layer (as per http://cs231n.github.io/neural-networks-2/), in order to provide the same initial output variance at each neuron.
End of explanation
"""
# Forward propagate
def forw_prop(X, W1, W2):
# Hidden layer.
h11 = X.dot(W1)
h12 = np.maximum(0, h11) # ReLU nonlinearity
# Bias trick.
h12 = np.c_[h12, np.ones(h12.shape[0])]
# Final layer.
f = h12.dot(W2)
# Softmax transformation.
probs = np.exp(f)
prob_sums = probs.sum(axis=1, keepdims=True)
probs /= prob_sums
return probs, h11, h12
"""
Explanation: We use the notation $h_{i,1}$, $h_{i,2}$ to specify the output of $i-$th layer before and after a nonlinearity, respectively. For example, if the $i-$th hidden layer contains a sigmoid activation function, then $$h_{i,1}=h_{i-1,2}W_i$$ and $$h_{i,2}=\sigma({h_{i,1}}).$$ Additionally, the bias trick could be applied to the output with nonlinearity in order to implicitly account for the bias in the weights.
End of explanation
"""
# Compute the softmax loss http://cs231n.github.io/linear-classify/#softmax
def calc_loss(probs, y, W1, W2, reg):
data_loss = -np.mean(np.log(probs[range(y.shape[0]), y]))
reg_loss = reg * 0.5 * (np.sum(W1 * W1) + np.sum(W2 * W2))
return data_loss + reg_loss
"""
Explanation: The loss function is defined as the mean of sample losses $$ L_i = -f_i[y_i] + \log\Sigma_j\, e^{f_i[j]},\; \text{where }\; f_i=x_i^TW.$$ The final loss is then: $$L = \frac{1}{N} \Sigma_i^N\, L_i + \frac{\lambda}{2}(||W_1||_2^2 + ||W_2||_2^2)$$
End of explanation
"""
# Backpropagate
def back_prop(probs, X, y, h11, h12, W1, W2, reg):
# Partial derivatives at the final layer.
dL_df = probs
dL_df[range(y.shape[0]), y] -= 1
dL_df /= num_train
# Propagate back to the weights, along with the regularization term.
dL_dW2 = h12.T.dot(dL_df) + reg * W2
# At the output of the hidden layer.
dL_dh12 = dL_df.dot(W2.T)
# Propagate back through nonlinearities to the input of the layer.
dL_dh11 = dL_dh12[:,:-1] # Account for bias trick.
dL_dh11[h11 < 0] = 0 # ReLU
dL_dW1 = X.T.dot(dL_dh11) + reg * W1
return dL_dW1, dL_dW2
def accuracy(X, y, W1, W2):
h = np.maximum(0, X.dot(W1))
h = np.c_[h, np.ones(h.shape[0])]
f = h.dot(W2)
return np.mean(np.argmax(f, axis=1) == y)
"""
Explanation: During forward prop, we compose multiple functions to get the final output. Those functions could be simple dot products in case of weights, or complicated nonlinear functions within neurons. An important question when doing backpropagation then is w.r.t. what to differentiate when applying the chain rule?
For example, assume the final output is a composition of $f_1, f_2,$ and $f_3$, i.e. $f(X) = f_3(f_2(f_1(X)))$.
We could apply the chain rule directly:
$$\frac{\partial{f}}{\partial{X}} = \frac{\partial{f_3}}{\partial{f_2}}\frac{\partial{f_2}}{\partial{f_1}}\frac{\partial{f_1}}{\partial{X}},$$
or, for example, define $g = f_3 \circ f_2$ to get:
$$\frac{\partial{f}}{\partial{X}} = \frac{\partial{g}}{\partial{f_1}} \frac{\partial{f_1}}{\partial{X}}.$$
The common approach is combine the nonlinear function(s) at the hidden layers, differentiate w.r.t. the output of the hidden layer, and back propagate through the nonlinearity to get the differential w.r.t. the input of the layer.
End of explanation
"""
# Hyperparameters.
reg = 0.001
step_size = 0.1
num_hidden = 200
data_per_class = 300 # Number of points per class
num_classes = 3 # Number of classes
X, y = generate_data(data_per_class, num_classes)
num_inputs = X.shape[1]
W1, W2 = initialize(num_inputs, num_hidden)
# Preprocess the data.
# Split data into train and test data.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33)
num_train = X_train.shape[0]
num_test = X_test.shape[0]
# The bias trick.
X_train = np.c_[X_train, np.ones(num_train)]
X_test = np.c_[X_test, np.ones(num_test)]
"""
Explanation: Putting it all together
Now that all the neccessary functions are defined, we can prepare the data and train the network.
End of explanation
"""
# Now we can perform gradient descent.
for i in xrange(5001):
probs, h11, h12 = forw_prop(X_train, W1, W2)
loss = calc_loss(probs, y_train, W1, W2, reg)
dW1, dW2 = back_prop(probs, X_train, y_train, h11, h12, W1, W2, reg)
W1 -= step_size * dW1
W2 -= step_size * dW2
if i % 500 == 0:
print "Step %4d. Loss=%.3f, train accuracy=%.5f" % (i, loss, accuracy(X_train, y_train, W1, W2))
print "Test accuracy=%.5f" % accuracy(X_test, y_test, W1, W2)
"""
Explanation: Training steps
End of explanation
"""
# Plot the resulting classifier on the test data.
h = 0.02
x_min, x_max = X_test[:, 0].min() - 1, X_test[:, 0].max() + 1
y_min, y_max = X_test[:, 1].min() - 1, X_test[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
h = np.maximum(0, np.dot(np.c_[xx.ravel(), yy.ravel(), np.ones(xx.ravel().shape)], W1))
h = np.c_[h, np.ones(h.shape[0])]
Z = np.dot(h, W2)
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
fig = plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max());
"""
Explanation: Visualization
End of explanation
"""
# Tinkering
a = np.array([[-1, 4, 5], [2, 8, 0]])
print a
print np.sum(a, axis=1)
a / a.sum(axis=1, keepdims=True).astype(float)
print np.maximum(0, a)
"""
Explanation: Tinkering with numpy
This part is used for tinkering with numpy to make sure operations are performed in the desired way and dimensions are preserved.
End of explanation
"""
|
AllenDowney/ThinkStats2 | examples/groupby_example.ipynb | gpl-3.0 | %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='white')
from thinkstats2 import Pmf, Cdf
import thinkstats2
import thinkplot
decorate = thinkplot.config
"""
Explanation: GroupBy examples
Allen Downey
MIT License
End of explanation
"""
%time gss = pd.read_hdf('../homeworks/gss.hdf5', 'gss')
gss.head()
def counts(series):
return series.value_counts(sort=False).sort_index()
"""
Explanation: Let's load the GSS dataset.
End of explanation
"""
counts(gss['year'])
"""
Explanation: The GSS interviews a few thousand respondents each year.
End of explanation
"""
counts(gss['grass'])
"""
Explanation: One of the questions they ask is "Do you think the use of marijuana should be made legal or not?"
The answer codes are:
1 Legal
2 Not legal
8 Don't know
9 No answer
0 Not applicable
Here is the distribution of responses for all years.
End of explanation
"""
gss['grass'].replace([0,8,9], np.nan, inplace=True)
"""
Explanation: I'll replace "Don't know", "No answer", and "Not applicable" with NaN.
End of explanation
"""
gss['grass'].replace(2, 0, inplace=True)
"""
Explanation: And replace 2, which represents "No", with 1. That way we can use mean to compute the fraction in favor.
End of explanation
"""
counts(gss['grass'])
"""
Explanation: Here are the value counts after replacement.
End of explanation
"""
gss['grass'].mean()
"""
Explanation: And here's the mean.
End of explanation
"""
grouped = gss.groupby('year')
grouped
"""
Explanation: So 30% of respondents thought marijuana should be legal, at the time they were interviewed.
Now we can see how that fraction depends on age, cohort (year of birth), and period (year of interview).
Group by year
First we'll group respondents by year.
End of explanation
"""
for name, group in grouped:
print(name, len(group))
"""
Explanation: The result in a DataFrameGroupBy object we can iterate through:
End of explanation
"""
for name, group in grouped:
print(name, group['grass'].mean())
"""
Explanation: And we can compute summary statistics for each group.
End of explanation
"""
grouped['grass']
"""
Explanation: Using a for loop can be useful for debugging, but it is more concise, more idiomatic, and faster to apply operations directly to the DataFrameGroupBy object.
For example, if you select a column from a DataFrameGroupBy, the result is a SeriesGroupBy that represents one Series for each group.
End of explanation
"""
for name, series in grouped['grass']:
print(name, series.mean())
"""
Explanation: You can loop through the SeriesGroupBy, but you normally don't.
End of explanation
"""
series = grouped['grass'].mean()
series
"""
Explanation: Instead, you can apply a function to the SeriesGroupBy; the result is a new Series that maps from group names to the results from the function; in this case, it's the fraction of support for each interview year.
End of explanation
"""
series.plot(color='C0')
decorate(xlabel='Year of interview',
ylabel='% in favor',
title='Should marijuana be made legal?')
"""
Explanation: Overall support for legalization has been increasing since 1990.
End of explanation
"""
counts(gss['cohort'])
"""
Explanation: Group by cohort
The variable cohort contains respondents' year of birth.
End of explanation
"""
grouped = gss.groupby('cohort')
series = grouped['grass'].mean()
series.plot(color='C1')
decorate(xlabel='Year of birth',
ylabel='% in favor',
title='Should marijuana be made legal?')
"""
Explanation: Pulling together the code from the previous section, we can plot support for legalization by year of birth.
End of explanation
"""
grouped = gss.groupby('age')
series = grouped['grass'].mean()
series.plot(color='C2')
decorate(xlabel='Age at interview',
ylabel='% in favor',
title='Should marijuana be made legal?')
"""
Explanation: Later generations are more likely to support legalization than earlier generations.
Group by age
Finally, let's see how support varies with age at time of interview.
End of explanation
"""
|
wuafeing/Python3-Tutorial | 01 data structures and algorithms/01.06 map keys to multiple values in dict.ipynb | gpl-3.0 | d = {
"a" : [1, 2, 3],
"b" : [4, 5]
}
e = {
"a" : {1, 2, 3},
"b" : {4, 5}
}
"""
Explanation: Previous
1.6 字典中的键映射多个值
问题
怎样实现一个键对应多个值的字典(也叫 multidict )?
解决方案
一个字典就是一个键对应一个单值的映射。如果你想要一个键映射多个值,那么你就需要将这多个值放到另外的容器中, 比如列表或者集合里面。比如,你可以像下面这样构造这样的字典:
End of explanation
"""
from collections import defaultdict
d = defaultdict(list)
d["a"].append(1)
d["a"].append(2)
d["b"].append(4)
d = defaultdict(set)
d["a"].add(1)
d["a"].add(2)
d["b"].add(4)
"""
Explanation: 选择使用列表还是集合取决于你的实际需求。如果你想保持元素的插入顺序就应该使用列表, 如果想去掉重复元素就使用集合(并且不关心元素的顺序问题)。
你可以很方便的使用 collections 模块中的 defaultdict 来构造这样的字典。 defaultdict 的一个特征是它会自动初始化每个 key 刚开始对应的值,所以你只需要关注添加元素操作了。比如:
End of explanation
"""
d = {} #A regular dictionary
d.setdefault("a", []).append(1)
d.setdefault("a", []).append(2)
d.setdefault("b", []).append(4)
"""
Explanation: 需要注意的是, defaultdict 会自动为将要访问的键(就算目前字典中并不存在这样的键)创建映射实体。 如果你并不需要这样的特性,你可以在一个普通的字典上使用 setdefault() 方法来代替。比如:
End of explanation
"""
|
LSSTC-DSFP/LSSTC-DSFP-Sessions | Sessions/Session04/Day2/GPLecture1.ipynb | mit | def gauss1d(x,mu,sig):
return np.exp(-(x-mu)**2/sig*2/2.)/np.sqrt(2*np.pi)/sig
def pltgauss1d(sig=1):
mu=0
x = np.r_[-4:4:101j]
pl.figure(figsize=(10,7))
pl.plot(x, gauss1d(x,mu,sig),'k-');
pl.axvline(mu,c='k',ls='-');
pl.axvline(mu+sig,c='k',ls='--');
pl.axvline(mu-sig,c='k',ls='--');
pl.axvline(mu+2*sig,c='k',ls=':');
pl.axvline(mu-2*sig,c='k',ls=':');
pl.xlim(x.min(),x.max());
pl.ylim(0,1);
pl.xlabel(r'$y$');
pl.ylabel(r'$p(y)$');
return
interact(pltgauss1d,
sig=widgets.FloatSlider(value=1.0,
min=0.5,
max=2.0,
step=0.25,
description=r'$\sigma$',
readout_format='.2f'));
"""
Explanation: Gaussian process regression
Lecture 1
Suzanne Aigrain, University of Oxford
LSST DSFP Session 4, Seattle, Sept 2017
Lecture 1: Introduction and basics
Tutorial 1: Write your own GP code
Lecture 2: Examples and practical considerations
Tutorial 3: Useful GP modules
Lecture 3: Advanced applications
Why GPs?
flexible, robust probabilistic regression and classification tools.
applied across a wide range of fields, from finance to zoology.
useful for data containing non-trivial stochastic signals or noise.
time-series data: causation implies correlation, so noise always correlated.
increasingly popular in astronomy [mainly time-domain, but not just].
Spitzer exoplanet transits and eclipses (Evans et al. 2015)
<img src="images/Evans_Spitzer.png" width="800">
GPz photometric redshifts (Almosallam, Jarvis & Roberts 2016)
<img src="images/Almosallam_GPz.png" width="600">
What is a GP?
A Gaussian process is a collection of random variables, any
finite number of which have a joint Gaussian distribution.
Consider a scalar variable $y$, drawn from a Gaussian distribution with mean $\mu$ and variance $\sigma^2$:
$$
p(y) = \frac{1}{\sqrt{2 \pi} \sigma} \exp \left[ - \frac{(y-\mu)^2}{2 \sigma^2} \right].
$$
As a short hand, we write: $y \sim \mathcal{N}(\mu,\sigma^2)$.
End of explanation
"""
def gauss2d(x1,x2,mu1,mu2,sig1,sig2,rho):
z = (x1-mu1)**2/sig1**2 + (x2-mu2)**2/sig2**2 - 2*rho*(x1-mu1)*(x2-mu2)/sig1/sig2
e = np.exp(-z/2/(1-rho**2))
return e/(2*np.pi*sig1*sig2*np.sqrt(1-rho**2))
def pltgauss2d(rho=0,show_cond=0):
mu1, sig1 = 0,1
mu2, sig2 = 0,1
y2o = -1
x1 = np.r_[-4:4:101j]
x2 = np.r_[-4:4:101j]
x22d,x12d = np.mgrid[-4:4:101j,-4:4:101j]
y = gauss2d(x12d,x22d,mu1,mu2,sig1,sig2,rho)
y1 = gauss1d(x1,mu1,sig1)
y2 = gauss1d(x2,mu2,sig2)
mu12 = mu1+rho*(y2o-mu2)/sig2**2
sig12 = np.sqrt(sig1**2-rho**2*sig2**2)
y12 = gauss1d(x1,mu12,sig12)
pl.figure(figsize=(10,10))
ax1 = pl.subplot2grid((3,3),(1,0),colspan=2,rowspan=2,aspect='equal')
v = np.array([0.02,0.1,0.3,0.6]) * y.max()
CS = pl.contour(x1,x2,y,v,colors='k')
if show_cond: pl.axhline(y2o,c='r')
pl.xlabel(r'$y_1$');
pl.ylabel(r'$y_2$');
pl.xlim(x1.min(),x1.max())
ax1.xaxis.set_major_locator(pl.MaxNLocator(5, prune = 'both'))
ax1.yaxis.set_major_locator(pl.MaxNLocator(5, prune = 'both'))
ax2 = pl.subplot2grid((3,3),(0,0),colspan=2,sharex=ax1)
pl.plot(x1,y1,'k-')
if show_cond: pl.plot(x1,y12,'r-')
pl.ylim(0,0.8)
pl.ylabel(r'$p(y_1)$')
pl.setp(ax2.get_xticklabels(), visible=False)
ax2.xaxis.set_major_locator(pl.MaxNLocator(5, prune = 'both'))
ax2.yaxis.set_major_locator(pl.MaxNLocator(4, prune = 'upper'))
pl.xlim(x1.min(),x1.max())
ax3 = pl.subplot2grid((3,3),(1,2),rowspan=2,sharey=ax1)
pl.plot(y2,x2,'k-')
if show_cond: pl.axhline(y2o,c='r')
pl.ylim(x2.min(),x2.max());
pl.xlim(0,0.8);
pl.xlabel(r'$p(y_2)$')
pl.setp(ax3.get_yticklabels(), visible=False)
ax3.xaxis.set_major_locator(pl.MaxNLocator(4, prune = 'upper'))
ax3.yaxis.set_major_locator(pl.MaxNLocator(5, prune = 'both'))
pl.subplots_adjust(hspace=0,wspace=0)
return
interact(pltgauss2d,
rho=widgets.FloatSlider(min=-0.8,max=0.8,step=0.4,description=r'$\rho$',value=0),
show_cond=widgets.Checkbox(value=True,description='show conditional distribution'));
"""
Explanation: Now let us consider a pair of variables $y_1$ and $y_2$, drawn from a bivariate Gaussian distribution. The joint probability density for $y_1$ and $y_2$ is:
$$
\left[ \begin{array}{l} y_1 \ y_2 \end{array} \right] \sim \mathcal{N} \left(
\left[ \begin{array}{l} \mu_1 \ \mu_2 \end{array} \right] ,
\left[ \begin{array}{ll}
\sigma_1^2 & C \
C & \sigma_2^2
\end{array} \right]
\right),
$$
where $C = {\rm cov}(y_1,y_2)$ is the covariance between $y_1$ and $y_2$.
The second term on the right hand side is the covariance matrix, $K$.
We now use two powerful identities of Gaussian distributions to elucidate the relationship between $y_1$ and $y_2$.
The marginal distribution of $y_1$ describes what we know about $y_1$ in the absence of any other information about $y_2$, and is simply:
$$
p(y_1)= \mathcal{N} (\mu_1,\sigma_1^2).
$$
If we know the value of $y_2$, the probability density for $y_1$ collapses to the the conditional distribution of $y_1$ given $y_2$:
$$
p(y_1 \mid y_2) = \mathcal{N} \left( \mu_1 + C (y_2-\mu_2)/\sigma_2^2, \sigma_1^2-C^2\sigma_2^2 \right).
$$
If $K$ is diagonal, i.e. if $C=0$, $p(y_1 \mid y_2) = p(y_1)$. Measuring $y_2$ doesn't teach us anything about $y_1$. The two variables are uncorrelated.
If the variables are correlated ($C \neq 0$), measuring $y_2$ does alter our knowledge of $y_1$: it modifies the mean and reduces the variance.
End of explanation
"""
def SEKernel(par, x1, x2):
A, Gamma = par
D2 = cdist(x1.reshape(len(x1),1), x2.reshape(len(x2),1),
metric = 'sqeuclidean')
return A * np.exp(-Gamma*D2)
A = 1.0
Gamma = 0.01
x = np.array([-1,1])
K = SEKernel([A,Gamma],x,x)
m = np.zeros(len(x))
sig = np.sqrt(np.diag(K))
pl.figure(figsize=(15,7))
pl.subplot(121)
for i in range(len(x)):
pl.plot([x[i]-0.1,x[i]+0.1],[m[i],m[i]],'k-')
pl.fill_between([x[i]-0.1,x[i]+0.1],
[m[i]+sig[i],m[i]+sig[i]],
[m[i]-sig[i],m[i]-sig[i]],color='k',alpha=0.2)
pl.xlim(-2,2)
pl.ylim(-2,2)
pl.xlabel(r'$x$')
pl.ylabel(r'$y$');
def Pred_GP(CovFunc, CovPar, xobs, yobs, eobs, xtest):
# evaluate the covariance matrix for pairs of observed inputs
K = CovFunc(CovPar, xobs, xobs)
# add white noise
K += np.identity(xobs.shape[0]) * eobs**2
# evaluate the covariance matrix for pairs of test inputs
Kss = CovFunc(CovPar, xtest, xtest)
# evaluate the cross-term
Ks = CovFunc(CovPar, xtest, xobs)
# invert K
Ki = inv(K)
# evaluate the predictive mean
m = np.dot(Ks, np.dot(Ki, yobs))
# evaluate the covariance
cov = Kss - np.dot(Ks, np.dot(Ki, Ks.T))
return m, cov
xobs = np.array([-1])
yobs = np.array([1.0])
eobs = 0.0001
pl.subplot(122)
pl.errorbar(xobs,yobs,yerr=eobs,capsize=0,fmt='k.')
x = np.array([1])
m,C=Pred_GP(SEKernel,[A,Gamma],xobs,yobs,eobs,x)
sig = np.sqrt(np.diag(C))
for i in range(len(x)):
pl.plot([x[i]-0.1,x[i]+0.1],[m[i],m[i]],'k-')
pl.fill_between([x[i]-0.1,x[i]+0.1],
[m[i]+sig[i],m[i]+sig[i]],
[m[i]-sig[i],m[i]-sig[i]],color='k',alpha=0.2)
pl.xlim(-2,2)
pl.ylim(-2,2)
pl.xlabel(r'$x$')
pl.ylabel(r'$y$');
"""
Explanation: To make the relation to time-series data a bit more obvious, let's plot the two variables side by side, then see what happens to one variable when we observe (fix) the other.
End of explanation
"""
xobs = np.array([-1,1,2])
yobs = np.array([1,-1,0])
eobs = np.array([0.0001,0.1,0.1])
pl.figure(figsize=(15,7))
pl.subplot(121)
pl.errorbar(xobs,yobs,yerr=eobs,capsize=0,fmt='k.')
Gamma = 0.5
x = np.array([-2.5,-2,-1.5,-0.5, 0.0, 0.5,1.5,2.5])
m,C=Pred_GP(SEKernel,[A,Gamma],xobs,yobs,eobs,x)
sig = np.sqrt(np.diag(C))
for i in range(len(x)):
pl.plot([x[i]-0.1,x[i]+0.1],[m[i],m[i]],'k-')
pl.fill_between([x[i]-0.1,x[i]+0.1],
[m[i]+sig[i],m[i]+sig[i]],
[m[i]-sig[i],m[i]-sig[i]],color='k',alpha=0.2)
pl.xlim(-3,3)
pl.ylim(-3,3)
pl.xlabel(r'$x$')
pl.ylabel(r'$y$');
pl.subplot(122)
pl.errorbar(xobs,yobs,yerr=eobs,capsize=0,fmt='k.')
x = np.linspace(-3,3,100)
m,C=Pred_GP(SEKernel,[A,Gamma],xobs,yobs,eobs,x)
sig = np.sqrt(np.diag(C))
pl.plot(x,m,'k-')
pl.fill_between(x,m+sig,m-sig,color='k',alpha=0.2)
pl.xlim(-3,3)
pl.ylim(-3,3)
pl.xlabel(r'$x$')
pl.ylabel(r'$y$');
"""
Explanation: Now consider $N$ variables drawn from a multivariate Gaussian distribution:
$$
\boldsymbol{y} \sim \mathcal{N} (\boldsymbol{m},K)
$$
where $y = (y_1,y_2,\ldots,y_N)^T$, $\boldsymbol{m} = (m_1,m_2,\ldots,m_N)^T$ is the mean vector, and $K$ is an $N \times N$ positive semi-definite covariance matrix, with elements $K_{ij}={\rm cov}(y_i,y_j)$.
A Gaussian process is an extension of this concept to infinite $N$, giving rise to a probability distribution over functions.
This last generalisation may not be obvious conceptually, but in practice only ever deal with finite samples.
End of explanation
"""
def kernel_SE(X1,X2,par):
p0 = 10.0**par[0]
p1 = 10.0**par[1]
D2 = cdist(X1,X2,'sqeuclidean')
K = p0 * np.exp(- p1 * D2)
return np.matrix(K)
def kernel_Mat32(X1,X2,par):
p0 = 10.0**par[0]
p1 = 10.0**par[1]
DD = cdist(X1, X2, 'euclidean')
arg = np.sqrt(3) * abs(DD) / p1
K = p0 * (1 + arg) * np.exp(- arg)
return np.matrix(K)
def kernel_RQ(X1,X2,par):
p0 = 10.0**par[0]
p1 = 10.0**par[1]
alpha = par[2]
D2 = cdist(X1, X2, 'sqeuclidean')
K = p0 * (1 + D2 / (2*alpha*p1))**(-alpha)
return np.matrix(K)
def kernel_Per(X1,X2,par):
p0 = 10.0**par[0]
p1 = 10.0**par[1]
period = par[2]
DD = cdist(X1, X2, 'euclidean')
K = p0 * np.exp(- p1*(np.sin(np.pi * DD / period))**2)
return np.matrix(K)
def kernel_QP(X1,X2,par):
p0 = 10.0**par[0]
p1 = 10.0**par[1]
period = par[2]
p3 = 10.0**par[3]
DD = cdist(X1, X2, 'euclidean')
D2 = cdist(X1, X2, 'sqeuclidean')
K = p0 * np.exp(- p1*(np.sin(np.pi * DD / period))**2 - p3 * D2)
return np.matrix(K)
def add_wn(K,lsig):
sigma=10.0**lsig
N = K.shape[0]
return K + sigma**2 * np.identity(N)
def get_kernel(name):
if name == 'SE': return kernel_SE
elif name == 'RQ': return kernel_RQ
elif name == 'M32': return kernel_Mat32
elif name == 'Per': return kernel_Per
elif name == 'QP': return kernel_QP
else:
print 'No kernel called %s - using SE' % name
return kernel_SE
def pltsamples1(par0=0.0, par1=0.0, wn = 0.0):
x = np.r_[-5:5:201j]
X = np.matrix([x]).T # scipy.spatial.distance expects matrices
kernel = get_kernel('SE')
K = kernel(X,X,[par0,par1])
K = add_wn(K,wn)
fig=pl.figure(figsize=(10,4))
ax1 = pl.subplot2grid((1,3), (0, 0), aspect='equal')
pl.imshow(np.sqrt(K),interpolation='nearest',vmin=0,vmax=10)
pl.title('Covariance matrix')
ax2 = pl.subplot2grid((1,3), (0,1),colspan=2)
np.random.seed(0)
for i in range(3):
y = np.random.multivariate_normal(np.zeros(len(x)),K)
pl.plot(x,y-i*2)
pl.xlim(-5,5)
pl.ylim(-8,5)
pl.xlabel('x')
pl.ylabel('y')
pl.title('Samples from %s prior' % 'SE')
pl.tight_layout()
interact(pltsamples1,
par0=widgets.FloatSlider(min=-1,max=1,step=0.5,description=r'$\log_{10} A$',value=0),
par1=widgets.FloatSlider(min=-1,max=1,step=0.5,description=r'$\log_{10} \Gamma$',value=0),
wn=widgets.FloatSlider(min=-2,max=0,step=1,description=r'$\log_{10} \sigma$',value=-2)
);
"""
Explanation: Textbooks
A good, detailed reference is Gaussian Processes for Machine Learning by C. E. Rasmussen & C. Williams, MIT Press, 2006.
The examples in the book are generated using the Matlab package GPML.
A more formal definition
A Gaussian process is completely specified by its mean function and covariance function.
We define the mean function $m(x)$ and the covariance function $k(x,x)$ of a real process $y(x)$ as
$$
\begin{array}{rcl}
m(x) & = & \mathbb{E}[y(x)], \
k(x,x') & = & \mathrm{cov}(y(x),y(x'))=\mathbb{E}[(y(x) − m(x))(y(x') − m(x'))].
\end{array}
$$
A very common covariance function is the squared exponential, or radial basis function (RBF) kernel
$$
K_{ij}=k(x_i,x_j)=A \exp\left[ - \Gamma (x_i-x_j)^2 \right],
$$
which has 2 parameters: $A$ and $\Gamma$.
We then write the Gaussian process as
$$
y(x) \sim \mathcal{GP}(m(x), k(x,x'))
$$
Here we are implicitly assuming the inputs $x$ are one-dimensional, e.g. $x$ might represent time. However, the input space can have more than one dimension. We will see an example of a GP with multi-dimensional inputs later.
The prior
Now consider a finite set of inputs $\boldsymbol{x}$, with corresponding outputs $\boldsymbol{y}$.
The joint distribution of $\boldsymbol{y}$ given $\boldsymbol{x}$, $m$ and $k$ is
$$
\mathrm{p}(\boldsymbol{y} \mid \boldsymbol{x},m,k) = \mathcal{N}( \boldsymbol{m},K),
$$
where $\boldsymbol{m}=m(\boldsymbol{x})$ is the mean vector,
and $K$ is the covariance matrix, with elements $K_{ij} = k(x_i,x_j)$.
Test and training sets
Suppose we have an (observed) training set $(\boldsymbol{x},\boldsymbol{y})$.
We are interested in some other test set of inputs $\boldsymbol{x}_*$.
The joint distribution over the training and test sets is
$$
\mathrm{p} \left( \left[ \begin{array}{l} \boldsymbol{y} \ \boldsymbol{y} \end{array} \right] \right)
= \mathcal{N} \left( \left[ \begin{array}{l} \boldsymbol{m} \ \boldsymbol{m}_ \end{array} \right],
\left[ \begin{array}{ll} K & K \ K_^T & K_{**} \end{array} \right] \right),
$$
where $\boldsymbol{m} = m(\boldsymbol{x}_)$, $K{,ij} = k(x_{,i},x_{,j})$ and $K_{,ij} = k(x_i,x_{,j})$.
For simplicity, assume the mean function is zero everywhere: $\boldsymbol{m}=\boldsymbol{0}$. We will consider to non-trivial mean functions later.
The conditional distribution
The conditional distribution for the test set given the training set is:
$$
\mathrm{p} ( \boldsymbol{y} \mid \boldsymbol{y},k) = \mathcal{N} (
K_^T K^{-1} \boldsymbol{y}, K{} - K_^T K^{-1} K_ ).
$$
This is also known as the predictive distribution, because it can be use to predict future (or past) observations.
More generally, it can be used for interpolating the observations to any desired set of inputs.
This is one of the most widespread applications of GPs in some fields (e.g. kriging in geology, economic forecasting, ...)
Adding white noise
Real observations always contain a component of white noise, which we need to account for, but don't necessarily want to include in the predictions.
If the white noise variance $\sigma^2$ is constant, we can write
$$
\mathrm{cov}(y_i,y_j)=k(x_i,x_j)+\delta_{ij} \sigma^2,
$$
and the conditional distribution becomes
$$
\mathrm{p} ( \boldsymbol{y} \mid \boldsymbol{y},k) = \mathcal{N} (
K_^T (K + \sigma^2 \mathbb{I})^{-1} \boldsymbol{y}, K{} - K_^T (K + \sigma^2 \mathbb{I})^{-1} K_ ).
$$
In real life, we may need to learn $\sigma$ from the data, alongside the other contribution to the covariance matrix.
We assumed constant white noise, but it's trivial to allow for different $\sigma$ for each data point.
Single-point prediction
Let us look more closely at the predictive distribution for a single test point $x_*$.
It is a Gaussian with mean
$$
\overline{y} = \boldsymbol{k}_^T (K + \sigma^2 \mathbb{I})^{-1} \boldsymbol{y}
$$
and variance
$$
\mathbb{V}[y] = k(x_,x_) - \boldsymbol{k}_^T (K + \sigma^2 \mathbb{I})^{-1} \boldsymbol{k}_,
$$
where $\boldsymbol{k}_$ is the vector of covariances between the test point and the training points.
Notice the mean is a linear combination of the observations: the GP is a linear predictor.
It is also a linear combination of covariance functions, each centred on a training point:
$$
\overline{y}_ = \sum_{i=1}^N \alpha_i k(x_i,x_),
$$
where $\alpha_i = (K + \sigma^2 \mathbb{I})^{-1} y_i$.
The likelihood
The likelihood of the data under the GP model is simply:
$$
\mathrm{p}(\boldsymbol{y} \,|\, \boldsymbol{x}) = \mathcal{N}(\boldsymbol{y} \, | \, \boldsymbol{0},K + \sigma^2 \mathbb{I}).
$$
This is a measure of how well the model explains, or predicts, the training set.
In some textbooks this is referred to as the marginal likelihood.
This arises if one considers the observed $\boldsymbol{y}$ as noisy realisations of a latent (unobserved) Gaussian process $\boldsymbol{f}$.
The term marginal refers to marginalisation over the function values $\boldsymbol{f}$:
$$
\mathrm{p}(\boldsymbol{y} \,|\, \boldsymbol{x}) = \int \mathrm{p}(\boldsymbol{y} \,|\, \boldsymbol{f},\boldsymbol{x}) \, \mathrm{p}(\boldsymbol{f} \,|\, \boldsymbol{x}) \, \mathrm{d}\boldsymbol{f},
$$
where
$$
\mathrm{p}(\boldsymbol{f} \,|\, \boldsymbol{x}) = \mathcal{N}(\boldsymbol{f} \, | \, \boldsymbol{0},K)
$$
is the prior, and
$$
\mathrm{p}(\boldsymbol{y} \,|\, \boldsymbol{f},\boldsymbol{x}) = \mathcal{N}(\boldsymbol{y} \, | \, \boldsymbol{0},\sigma^2 \mathbb{I})
$$
is the likelihood.
Parameters and hyper-parameters
The parameters of the covariance and mean function as known as the hyper-parameters of the GP.
This is because the actual parameters of the model are the function values, $\boldsymbol{f}$, but we never explicitly deal with them: they are always marginalised over.
Conditioning the GP...
...means evaluating the conditional (or predictive) distribution for a given covariance matrix (i.e. covariance function and hyper-parameters), and training set.
Training the GP...
...means maximising the likelihood of the model with respect to the hyper-parameters.
The kernel trick
Consider a linear basis model with arbitrarily many basis functions, or features, $\Phi(x)$, and a (Gaussian) prior $\Sigma_{\mathrm{p}}$ over the basis function weights.
One ends up with exactly the same expressions for the predictive distribution and the likelihood so long as:
$$
k(\boldsymbol{x},\boldsymbol{x'}) = \Phi(\boldsymbol{x})^{\mathrm{T}} \Sigma_{\mathrm{p}} \Phi(\boldsymbol{x'}),
$$
or, writing $\Psi(\boldsymbol{x}) = \Sigma_{\mathrm{p}}^{1/2} \Phi(\boldsymbol{x})$,
$$
k(\boldsymbol{x},\boldsymbol{x'}) = \Psi(\boldsymbol{x}) \cdot \Psi(\boldsymbol{x'}),
$$
Thus the covariance function $k$ enables us to go from a (finite) input space to a (potentially infinite) feature space. This is known as the kernel trick and the covariance function is often referred to as the kernel.
Non-zero mean functions
In general (and in astronomy applications in particular) we often want to use non-trivial mean functions.
To do this simply replace $\boldsymbol{y}$ by $\boldsymbol{r}=\boldsymbol{y}-\boldsymbol{m}$ in the expressions for predictive distribution and likelihood.
The mean function represents the deterministic component of the model
- e.g.: a linear trend, a Keplerian orbit, a planetary transit, ...
The covariance function encodes the stochastic component.
- e.g.: instrumental noise, stellar variability
Covariance functions
The only requirement for the covariance function is that it should return a positive semi-definite covariance matrix.
The simplest covariance functions have two parameters: one input and one output variance (or scale). The form of the covariance function controls the degree of smoothness.
The squared exponential
The simplest, most widely used kernel is the squared exponential:
$$
k_{\rm SE}(x,x') = A \exp \left[ - \Gamma (x-x')^2 \right].
$$
This gives rise to smooth functions with variance $A$ and inverse scale (characteristic length scale) $A$ and output scale (amplitude) $l$.
End of explanation
"""
# Function to plot samples from kernel
def pltsamples2(par2=0.5, kernel_shortname='SE'):
x = np.r_[-5:5:201j]
X = np.matrix([x]).T # scipy.spatial.distance expects matrices
kernel = get_kernel(kernel_shortname)
K = kernel(X,X,[0.0,0.0,par2])
fig=pl.figure(figsize=(10,4))
ax1 = pl.subplot2grid((1,3), (0, 0), aspect='equal')
pl.imshow(np.sqrt(K),interpolation='nearest',vmin=0,vmax=10)
pl.title('Covariance matrix')
ax2 = pl.subplot2grid((1,3), (0,1),colspan=2)
np.random.seed(0)
for i in range(3):
y = np.random.multivariate_normal(np.zeros(len(x)),K)
pl.plot(x,y-i*2)
pl.xlim(-5,5)
pl.ylim(-8,5)
pl.xlabel('x')
pl.ylabel('y')
pl.title('Samples from %s prior' % kernel_shortname)
pl.tight_layout()
interact(pltsamples2,
par2=widgets.FloatSlider(min=0.25,max=1,step=0.25,description=r'$\alpha$',value=0.5),
kernel_shortname=widgets.RadioButtons(options=['SE','M32','RQ'], value='SE',description='kernel')
);
"""
Explanation: The Matern family
The Matern 3/2 kernel
$$
k_{3/2}(x,x')= A \left( 1 + \frac{\sqrt{3}r}{l} \right) \exp \left( - \frac{\sqrt{3}r}{l} \right),
$$
where $r =|x-x'|$.
It produces somewhat rougher behaviour, because it is only differentiable once w.r.t. $r$ (whereas the SE kernel is infinitely differentiable). There is a whole family of Matern kernels with varying degrees of roughness.
The rational quadratic kernel
is equivalent to a squared exponential with a powerlaw distribution of input scales
$$
k_{\rm RQ}(x,x') = A^2 \left(1 + \frac{r^2}{2 \alpha l} \right)^{-\alpha},
$$
where $\alpha$ is the index of the power law.
This is useful to model data containing variations on a range of timescales with just one extra parameter.
End of explanation
"""
# Function to plot samples from kernel
def pltsamples3(par2=2.0, par3=2.0,kernel_shortname='Per'):
x = np.r_[-5:5:201j]
X = np.matrix([x]).T # scipy.spatial.distance expects matrices
kernel = get_kernel(kernel_shortname)
K = kernel(X,X,[0.0,0.0,par2,par3])
fig=pl.figure(figsize=(10,4))
ax1 = pl.subplot2grid((1,3), (0, 0), aspect='equal')
pl.imshow(np.sqrt(K),interpolation='nearest',vmin=0,vmax=10)
pl.title('Covariance matrix')
ax2 = pl.subplot2grid((1,3), (0,1),colspan=2)
np.random.seed(0)
for i in range(3):
y = np.random.multivariate_normal(np.zeros(len(x)),K)
pl.plot(x,y-i*2)
pl.xlim(-5,5)
pl.ylim(-8,5)
pl.xlabel('x')
pl.ylabel('y')
pl.title('Samples from %s prior' % kernel_shortname)
pl.tight_layout()
interact(pltsamples3,
par2=widgets.FloatSlider(min=1,max=3,step=1,description=r'$P$',value=2),
par3=widgets.FloatSlider(min=-2,max=0,step=1,description=r'$\log\Gamma_2$',value=-1),
kernel_shortname=widgets.RadioButtons(options=['Per','QP'], value='QP',description='kernel')
);
"""
Explanation: Periodic kernels...
...can be constructed by replacing $r$ in any of the above by a periodic function of $r$. For example, the cosine kernel:
$$
k_{\cos}(x,x') = A \cos\left(\frac{2\pi r}{P}\right),
$$
[which follows the dynamics of a simple harmonic oscillator], or...
...the "exponential sine squared" kernel, obtained by mapping the 1-D variable $x$ to the 2-D variable $\mathbf{u}(x)=(\cos(x),\sin(x))$, and then applying a squared exponential in $\boldsymbol{u}$-space:
$$
k_{\sin^2 {\rm SE}}(x,x') = A \exp \left[ -\Gamma \sin^2\left(\frac{\pi r}{P}\right) \right],
$$
which allows for non-harmonic functions.
End of explanation
"""
import george
x2d, y2d = np.mgrid[-3:3:0.1,-3:3:0.1]
x = x2d.ravel()
y = y2d.ravel()
N = len(x)
X = np.zeros((N,2))
X[:,0] = x
X[:,1] = y
k1 = george.kernels.ExpSquaredKernel(1.0,ndim=2)
s1 = george.GP(k1).sample(X).reshape(x2d.shape)
k2 = george.kernels.ExpSquaredKernel(1.0,ndim=2,axes=1) + george.kernels.ExpSquaredKernel(0.2,ndim=2,axes=0)
s2 = george.GP(k2).sample(X).reshape(x2d.shape)
pl.figure(figsize=(10,5))
pl.subplot(121)
pl.contourf(x2d,y2d,s1)
pl.xlim(x.min(),x.max())
pl.ylim(y.min(),y.max())
pl.xlabel(r'$x$')
pl.ylabel(r'$y$')
pl.title('RBF')
pl.subplot(122)
pl.contourf(x2d,y2d,s2)
pl.xlim(x.min(),x.max())
pl.ylim(y.min(),y.max())
pl.xlabel(r'$x$')
pl.title('ARD');
# Function to plot samples from kernel
def pltsamples3(par2=0.5,par3=0.5, kernel_shortname='SE'):
x = np.r_[-5:5:201j]
X = np.matrix([x]).T # scipy.spatial.distance expects matrices
kernel = get_kernel(kernel_shortname)
K = kernel(X,X,[0.0,0.0,par2,par3])
fig=pl.figure(figsize=(10,4))
ax1 = pl.subplot2grid((1,3), (0, 0), aspect='equal')
pl.imshow(np.sqrt(K),interpolation='nearest',vmin=0,vmax=10)
pl.title('Covariance matrix')
ax2 = pl.subplot2grid((1,3), (0,1),colspan=2)
np.random.seed(0)
for i in range(5):
y = np.random.multivariate_normal(np.zeros(len(x)),K)
pl.plot(x,y)
pl.xlim(-5,5)
pl.ylim(-5,5)
pl.xlabel('x')
pl.ylabel('y')
pl.title('Samples from %s prior' % kernel_shortname)
pl.tight_layout()
interact(pltsamples3,
par2=widgets.FloatSlider(min=1,max=3,step=1,description=r'$P$',value=2),
par3=widgets.FloatSlider(min=-2,max=0,step=1,description=r'$\log_{10}\Gamma_2$',value=-1.),
kernel_shortname=widgets.RadioButtons(options=['Per','QP'], value='Per',description='kernel')
);
"""
Explanation: Combining kernels
Any affine tranform, sum or product of valid kernels is a valid kernel.
For example, a quasi-periodic kernel can be constructed by multiplying a periodic kernel with a non-periodic one. The following is frequently used to model stellar light curves:
$$
k_{\mathrm{QP}}(x,x') = A \exp \left[ -\Gamma_1 \sin^2\left(\frac{\pi r}{P}\right) -\Gamma_2 r^2 \right].
$$
Example: Mauna Kea CO$_2$ dataset
(From Rasmussen & Williams textbook)
<img height="700" src="images/RW_mauna_kea.png">
2 or more dimensions
So far we assumed the inputs were 1-D but that doesn't have to be the case. For example, the SE kernel can be extended to D dimensions...
using a single length scale, giving the Radial Basis Function (RBF) kernel:
$$
k_{\rm RBF}(\mathbf{x},\mathbf{x'}) = A \exp \left[ - \Gamma \sum_{j=1}^{D}(x_j-x'_j)^2 \right],
$$
where $\mathbf{x}=(x_1,x_2,\ldots, x_j,\ldots,x_D)^{\mathrm{T}}$ represents a single, multi-dimensional input.
or using separate length scales for each dimension, giving the Automatic Relevance Determination (ARD) kernel:
$$
k_{\rm ARD}(\mathbf{x},\mathbf{x'}) = A \exp \left[ - \sum_{j=1}^{D} \Gamma_j (x_j-x'_j)^2 \right].
$$
End of explanation
"""
|
Weenkus/Machine-Learning-University-of-Washington | Regression/assignments/Ridge Regression Programming Assignment 1.ipynb | mit | import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
import numpy as np
from math import ceil
"""
Explanation: Initialise the libs
End of explanation
"""
dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':float, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int}
regressionDir = '/home/weenkus/workspace/Machine Learning - University of Washington/Regression/datasets/'
sales = pd.read_csv(regressionDir + 'kc_house_data.csv', dtype = dtype_dict)
sales = sales.sort(['sqft_living','price'])
# dtype_dict same as above
set_1 = pd.read_csv(regressionDir + 'wk3_kc_house_set_1_data.csv', dtype=dtype_dict)
set_2 = pd.read_csv(regressionDir + 'wk3_kc_house_set_2_data.csv', dtype=dtype_dict)
set_3 = pd.read_csv(regressionDir + 'wk3_kc_house_set_3_data.csv', dtype=dtype_dict)
set_4 = pd.read_csv(regressionDir + 'wk3_kc_house_set_4_data.csv', dtype=dtype_dict)
train_valid_shuffled = pd.read_csv(regressionDir + 'wk3_kc_house_train_valid_shuffled.csv', dtype=dtype_dict)
test = pd.read_csv(regressionDir + 'wk3_kc_house_test_data.csv', dtype=dtype_dict)
training = pd.read_csv(regressionDir + 'wk3_kc_house_train_data.csv', dtype=dtype_dict)
"""
Explanation: Load the data
End of explanation
"""
# Show plots in jupyter
%matplotlib inline
sales.head()
sales['price'].head()
"""
Explanation: Data exploration
End of explanation
"""
def polynomial_dataframe(feature, degree): # feature is pandas.Series type
# assume that degree >= 1
# initialize the dataframe:
poly_dataframe = pd.DataFrame()
# and set poly_dataframe['power_1'] equal to the passed feature
poly_dataframe['power_1'] = feature
# first check if degree > 1
if degree > 1:
# then loop over the remaining degrees:
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# assign poly_dataframe[name] to be feature^power; use apply(*)
poly_dataframe[name] = feature;
poly_dataframe[name] = poly_dataframe[name].apply(lambda x: x**power)
return poly_dataframe
"""
Explanation: Helper functions
End of explanation
"""
poly15_data = polynomial_dataframe(sales['sqft_living'], 15) # use equivalent of `polynomial_sframe`
print(poly15_data)
l2_small_penalty = 1.5e-5
model = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model.fit(poly15_data, sales['price'])
model.coef_
plt.plot(poly15_data, model.predict(poly15_data), poly15_data, sales['price'])
plt.show()
"""
Explanation: Ridge regression model fitting
End of explanation
"""
l2_small_penalty=1e-9
poly15_data_set1 = polynomial_dataframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model1 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model1.fit(poly15_data_set1, set_1['price'])
poly15_data_set2 = polynomial_dataframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model2 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model2.fit(poly15_data_set2, set_2['price'])
poly15_data_set3 = polynomial_dataframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model3 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model3.fit(poly15_data_set3, set_3['price'])
poly15_data_set4 = polynomial_dataframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model4 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model4.fit(poly15_data_set4, set_4['price'])
plt.plot(poly15_data_set1, model1.predict(poly15_data_set1), poly15_data_set1, set_1['price'])
plt.show()
plt.plot(poly15_data_set2, model2.predict(poly15_data_set2), poly15_data_set2, set_2['price'])
plt.show()
plt.plot(poly15_data_set3, model3.predict(poly15_data_set3), poly15_data_set3, set_3['price'])
plt.show()
plt.plot(poly15_data_set4, model4.predict(poly15_data_set4), poly15_data_set4, set_4['price'])
plt.show()
print('Model 1 coefficients: ', model1.coef_)
print('Model 2 coefficients: ', model2.coef_)
print('Model 3 coefficients: ', model3.coef_)
print('Model 4 coefficients: ', model4.coef_)
"""
Explanation: Ridge regression on subsets
Using ridge regression with small l2
End of explanation
"""
l2_large_penalty=1.23e2
poly15_data_set1 = polynomial_dataframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model1 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model1.fit(poly15_data_set1, set_1['price'])
poly15_data_set2 = polynomial_dataframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model2 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model2.fit(poly15_data_set2, set_2['price'])
poly15_data_set3 = polynomial_dataframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model3 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model3.fit(poly15_data_set3, set_3['price'])
poly15_data_set4 = polynomial_dataframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model4 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model4.fit(poly15_data_set4, set_4['price'])
plt.plot(poly15_data_set1, model1.predict(poly15_data_set1), poly15_data_set1, set_1['price'])
plt.show()
plt.plot(poly15_data_set2, model2.predict(poly15_data_set2), poly15_data_set2, set_2['price'])
plt.show()
plt.plot(poly15_data_set3, model3.predict(poly15_data_set3), poly15_data_set3, set_3['price'])
plt.show()
plt.plot(poly15_data_set4, model4.predict(poly15_data_set4), poly15_data_set4, set_4['price'])
plt.show()
print('Model 1 coefficients: ', model1.coef_)
print('Model 2 coefficients: ', model2.coef_)
print('Model 3 coefficients: ', model3.coef_)
print('Model 4 coefficients: ', model4.coef_)
"""
Explanation: Applying a higher L2 value
End of explanation
"""
def k_fold_cross_validation(k, l2_penalty, data, output):
n = len(data)
sumRSS = 0
for i in range(k):
# Get the validation/training interval
start = (n*i)/k
end = (n*(i+1))/k-1
#print (i, (ceil(start), ceil(end)))
train_valid_shuffled[0:ceil(start)].append(train_valid_shuffled[ceil(end)+1:n])
# Train the model
model = linear_model.Ridge(alpha=l2_penalty, normalize=True)
model.fit(data, output)
# Calculate RSS
RSS = (abs(output - model.predict(data)) ** 2).sum()
# Add the RSS to the sum for computing the average
sumRSS += RSS
return (sumRSS / k)
print (k_fold_cross_validation(10, 1e-9, poly15_data_set2, set_2['price']))
"""
Explanation: Selecting an L2 penalty via cross-validation
Just like the polynomial degree, the L2 penalty is a "magic" parameter we need to select. We could use the validation set approach as we did in the last module, but that approach has a major disadvantage: it leaves fewer observations available for training. Cross-validation seeks to overcome this issue by using all of the training set in a smart way.
We will implement a kind of cross-validation called k-fold cross-validation. The method gets its name because it involves dividing the training set into k segments of roughtly equal size. Similar to the validation set method, we measure the validation error with one of the segments designated as the validation set. The major difference is that we repeat the process k times as follows:
Set aside segment 0 as the validation set, and fit a model on rest of data, and evalutate it on this validation set
Set aside segment 1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set
...
Set aside segment k-1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set
After this process, we compute the average of the k validation errors, and use it as an estimate of the generalization error. Notice that all observations are used for both training and validation, as we iterate over segments of data.
End of explanation
"""
import sys
l2s = np.logspace(3, 9, num=13)
train_valid_shuffled_poly15 = polynomial_dataframe(train_valid_shuffled['sqft_living'], 15)
k = 10
minError = sys.maxsize
for l2 in l2s:
avgError = k_fold_cross_validation(k, l2, train_valid_shuffled_poly15, train_valid_shuffled['price'])
print ('For l2:', l2, ' the CV is ', avgError)
if avgError < minError:
minError = avgError
bestl2 = l2
print (minError)
print (bestl2)
"""
Explanation: Minimize the l2 by using cross validation
End of explanation
"""
train_poly15 = polynomial_dataframe(training['sqft_living'], 15)
test_poly15 = polynomial_dataframe(test['sqft_living'], 15)
model = linear_model.Ridge(alpha=1000, normalize=True)
model.fit(train_poly15, training['price'])
print("Residual sum of squares: %.2f"
% ((model.predict(test_poly15) - test['price']) ** 2).sum())
"""
Explanation: Use the best l2 to train the model on all the data
End of explanation
"""
|
mne-tools/mne-tools.github.io | dev/_downloads/cfbef36033f8d33f28c4fe2cfa35314a/30_cluster_ftest_spatiotemporal.ipynb | bsd-3-clause | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD-3-Clause
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_src_adjacency
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
"""
Explanation: 2 samples permutation test on source data with spatio-temporal clustering
Tests if the source space data are significantly different between
2 groups of subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
End of explanation
"""
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
stc_fname = meg_path / 'sample_audvis-meg-lh.stc'
subjects_dir = data_path / 'subjects'
src_fname = subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-ico-5-src.fif'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50, npad='auto')
# Read the source space we are morphing to
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
morph = mne.compute_source_morph(stc, 'sample', 'fsaverage',
spacing=fsave_vertices, smooth=20,
subjects_dir=subjects_dir)
stc = morph.apply(stc)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep * 1000 # convert to milliseconds
n_subjects1, n_subjects2 = 6, 7
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
"""
Explanation: Set parameters
End of explanation
"""
print('Computing adjacency.')
adjacency = spatial_src_adjacency(src)
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) × time × space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation,
# and use a very low number of permutations for the same reason.
n_permutations = 50
p_threshold = 0.001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
F_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(
X, adjacency=adjacency, n_jobs=None, n_permutations=n_permutations,
threshold=f_threshold, buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
"""
Explanation: Compute statistic
To use an algorithm optimized for spatio-temporal clustering, we
just pass the spatial adjacency matrix (instead of spatio-temporal)
End of explanation
"""
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both',
views='lateral', subjects_dir=subjects_dir,
time_label='temporal extent (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
"""
Explanation: Visualize the clusters
End of explanation
"""
|
nicoguaro/notebooks_examples | Perturbation methods.ipynb | mit | eq = (1 + eps*u(t)**2)*diff(u(t), t, 2) + omega**2*u(t)
eq
ode_order(eq, u)
"""
Explanation: <div class="alert alert-warning">
**Note:** This notebook requires SymPy 1.5 to work.
</div>
Consider the following system
$$\ddot{u} + 4 u + \varepsilon u^2 \ddot{u} = 0 \enspace .$$
This system can be rewritten as
$$(1 + \varepsilon u^2)\ddot u + 4u = 0 \enspace ,$$
or
$$\ddot u + \frac{4u}{1 + \varepsilon u^2} = 0 \enspace .$$
As a first order system it reads
$$\begin{pmatrix}
\dot u \
\dot v
\end{pmatrix} = \begin{pmatrix}
v \
-\frac{4u}{1 + \varepsilon u^2}
\end{pmatrix} \enspace ,$$
with Jacobian matrix
$$J(u,v) = \begin{bmatrix}
0 & 1 \
-\frac{4(1 - \varepsilon u^2)}{(1+ \varepsilon u^2)^2} & 0
\end{bmatrix} \enspace .$$
This system has a fixed point in $(0,0)$, with eigenvalues
$$\lambda_1 = -2i,\quad \lambda_2 = 2i \enspace ,$$
and we can conclude that this fixed point is a center.
End of explanation
"""
u0 = Function('u0')
u1 = Function('u1')
subs = [(u(t), u0(t) + eps*u1(t))]
aux = eq.subs(subs)
aux.doit().expand()
poly = Poly(aux.doit(), eps)
coefs = poly.coeffs()
coefs
sol0 = dsolve(coefs[-1], u0(t)).rhs
sol0
aux = (u0(t)**2*u0(t).diff(t, 2)).subs(u0(t), sol0).doit()
aux
dsolve(u1(t).diff(t, 2) + omega**2*u1(t) + aux)
eq_aux = expand(coefs[-2].subs(u0(t), sol0))
eq_aux.doit()
sol1 = dsolve(eq_aux, u1(t)).rhs
sol1
C1, C2, C3, C4 = symbols("C1 C2 C3 C4")
print(sol1.subs({C1: 0, C2: 1, C3: 0, C4: -S(1)/4}))
u_app = sol0 + eps*sol1
aux_eqs = [
sol0.subs(t, 0)-1,
sol1.subs(t, 0),
diff(sol0, t).subs(t, 0),
diff(sol1, t).subs(t, 0)]
aux_eqs
coef = u_app.free_symbols - eq.free_symbols
coef
subs_sol = solve(aux_eqs, coef)
subs_sol
u_app2 = u_app.subs(subs_sol)
trigsimp(u_app2)
print(u_app2)
final_sol = trigsimp(u_app2).subs(omega, 2).expand()
final_sol
trigsimp(final_sol).expand()
sol = (1 - 5*eps/32)*cos(2*t) + eps/32*(6*cos(2*t) - cos(6*t) + 24*t*sin(2*t))
sol
plot((sol - final_sol).subs(eps, 1))
from scipy.integrate import odeint
def fun(x, t=0, eps=0.1):
x0, x1 = x
return [x1, -4*x0/(1 + eps*x0**2)]
t_vec = np.linspace(0, 100, 1000)
x = odeint(fun, [1, 0], t_vec, args=(0.1,))
lam_sol = lambdify((t, eps), final_sol, "numpy")
uu = lam_sol(t_vec, 0.1)
plt.figure()
plt.plot(t_vec, x[:,0])
plt.plot(t_vec, uu, '--r')
plt.show()
"""
Explanation: Straightforward expansion
Let's take $u = u_0 +\varepsilon u_1 + \cdots$. Replacing this in the equation we obtain
End of explanation
"""
|
StingraySoftware/notebooks | CrossCorrelation/cross_correlation_notebook.ipynb | mit | import numpy as np
from stingray import Lightcurve
from stingray.crosscorrelation import CrossCorrelation
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
%matplotlib inline
font_prop = font_manager.FontProperties(size=16)
"""
Explanation: CrossCorrelation
This Tutorial is intended to give a demostration of How to make a CrossCorrelation Object in Stingray Library.
End of explanation
"""
dt = 0.03125 # seconds
exposure = 10. # seconds
freq = 1 # Hz
times = np.arange(0, exposure, dt) # seconds
signal_1 = 300 * np.sin(2.*np.pi*freq*times) + 1000 # counts/s
signal_2 = 300 * np.sin(2.*np.pi*freq*times + np.pi/2) + 1000 # counts/s
noisy_1 = np.random.poisson(signal_1*dt) # counts
noisy_2 = np.random.poisson(signal_2*dt) # counts
"""
Explanation: CrossCorrelation Example
1. Create two light curves
There are two ways to create a Lightcurve.<br>
1) Using an array of time stamps and an array of counts.<br>
2) From the Photon Arrival times.
In this example, Lightcurve is created using arrays of time stamps and counts.
Generate an array of relative timestamps that's 10 seconds long, with dt = 0.03125 s, and make two signals in units of counts. The signal is a sine wave with amplitude = 300 cts/s, frequency = 2 Hz, phase offset of pi/2 radians, and mean = 1000 cts/s. We then add Poisson noise to the light curve.
End of explanation
"""
lc1 = Lightcurve(times, noisy_1)
lc2 = Lightcurve(times, noisy_2)
len(lc1)
fig, ax = plt.subplots(1,1,figsize=(10,6))
ax.plot(lc1.time, lc1.counts, lw=2, color='blue')
ax.plot(lc1.time, lc2.counts, lw=2, color='red')
ax.set_xlabel("Time (s)", fontproperties=font_prop)
ax.set_ylabel("Counts (cts)", fontproperties=font_prop)
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(which='major', width=1.5, length=7)
ax.tick_params(which='minor', width=1.5, length=4)
plt.show()
"""
Explanation: Now let's turn noisy_1 and noisy_2 into Lightcurve objects. This way we have two Lightcurves to calculate CrossCorrelation.
End of explanation
"""
cr = CrossCorrelation(lc1, lc2)
"""
Explanation: 2. Create a CrossCorrelation Object from two Light curves created above
To create a CrossCorrelation Object from LightCurves, simply pass both Lightvurves created above into the CrossCorrelation.
End of explanation
"""
cr.corr[:10]
# Time Resolution for Cross Correlation is same as that of each of the Lightcurves
cr.dt
"""
Explanation: Now, Cross Correlation values are stored in attribute corr, which is called below.
End of explanation
"""
cr.plot(labels = ['Time Lags (seconds)','Correlation'])
"""
Explanation: 3. Plot Cross Correlation for Different lags
To visulaize correlation for different values of time lags, simply call plot function on cs.
End of explanation
"""
cr.time_shift #seconds
"""
Explanation: Given the Phase offset of pi/2 between two lightcurves created above, and freq=1 Hz, time_shift should be close to 0.25 sec. Small error is due to time resolution.
End of explanation
"""
cr.mode
"""
Explanation: Modes of Correlation
You can also specify an optional argument on modes of cross-correlation. <br>
There are three modes : 1) same 2) valid 3) full
Visit following ink on more details on mode : https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.correlate.html
Default mode is 'same' and it gives output equal to the size of larger lightcurve and is most common in astronomy. You can see mode of your CrossCorrelation by calling mode attribute on the object.
End of explanation
"""
cr.n
"""
Explanation: The number of data points in corr and largest lightcurve are same in this mode.
End of explanation
"""
cr1 = CrossCorrelation(lc1, lc2, mode = 'full')
cr1.plot()
cr1.mode
"""
Explanation: Creating CrossCorrelation with full mode now using same data as above.
End of explanation
"""
cr1.n
"""
Explanation: Full mode does a full cross-correlation.
End of explanation
"""
cs = CrossCorrelation()
cs.corr = np.array([ 660, 1790, 3026, 4019, 5164, 6647, 8105, 7023, 6012, 5162])
time_shift, time_lags, n = cs.cal_timeshift(dt=0.5)
time_shift
cs.plot( ['Time Lags (seconds)','Correlation'])
"""
Explanation: Another Example
You can also create CrossCorrelation Object by using Cross Correlation data. This can be useful in some cases when you have correlation data and want to calculate time shift for max. correlation. You need to specify time resolution for correlation(default value of 1.0 seconds is used otherwise).
End of explanation
"""
dt = 0.0001 # seconds
exposure = 50. # seconds
freq = 1 # Hz
times = np.arange(0, exposure, dt) # seconds
signal_1 = 300 * np.sin(2.*np.pi*freq*times) + 1000 * dt # counts/s
signal_2 = 200 * np.sin(2.*np.pi*freq*times + np.pi/2) + 900 * dt # counts/s
"""
Explanation: Yet another Example with longer Lingcurve
I will be using same lightcurves as in the example above but with much longer duration and shorter lags.<br>
Both Lightcurves are chosen to be more or less same with a certain phase shift to demonstrate Correlation in a better way.
Again Generating two signals this time without poission noise so that time lag can be demonstrated. For noisy lightcurves, accurate calculation requires interpolation.
End of explanation
"""
lc1 = Lightcurve(times, signal_1)
lc2 = Lightcurve(times, signal_2)
len(lc1)
fig, ax = plt.subplots(1,1,figsize=(10,6))
ax.plot(lc1.time, lc1.counts, lw=2, color='blue')
ax.plot(lc1.time, lc2.counts, lw=2, color='red')
ax.set_xlabel("Time (s)", fontproperties=font_prop)
ax.set_ylabel("Counts (cts)", fontproperties=font_prop)
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(which='major', width=1.5, length=7)
ax.tick_params(which='minor', width=1.5, length=4)
plt.show()
"""
Explanation: Converting noisy signals into Lightcurves.
End of explanation
"""
cs = CrossCorrelation(lc1, lc2)
print('Done')
cs.corr[:50]
# Time Resolution for Cross Correlation is same as that of each of the Lightcurves
cs.dt
cs.plot( ['Time Lags (seconds)','Correlation'])
cs.time_shift #seconds
"""
Explanation: Now, creating CrossCorrelation Object by passing lc1 and lc2 into the constructor.
End of explanation
"""
from stingray.crosscorrelation import AutoCorrelation
"""
Explanation: time_shift is very close to 0.25 sec, in this case.
AutoCorrelation
Stingray has also separate class for AutoCorrelation. AutoCorrealtion is similar to crosscorrelation but involves only One Lightcurve.i.e. Correlation of Lightcurve with itself.
AutoCorrelation is part of stingray.crosscorrelation module. Following line imports AutoCorrelation.
End of explanation
"""
lc = lc1
ac = AutoCorrelation(lc)
ac.n
ac.corr[:10]
ac.time_lags
"""
Explanation: To create AutoCorrelation object, simply pass lightcurve into AutoCorrelation Constructor.<br> Using same Lighrcurve created above to demonstrate AutoCorrelation.
End of explanation
"""
ac.time_shift
ac.plot()
"""
Explanation: time_Shift for AutoCorrelation is always zero. Since signals are maximally correlated at zero lag.
End of explanation
"""
dt = 0.001 # seconds
exposure = 20. # seconds
freq = 1 # Hz
times = np.arange(0, exposure, dt) # seconds
signal_1 = 300 * np.sin(2.*np.pi*freq*times) + 1000 # counts/s
noisy_1 = np.random.poisson(signal_1*dt) # counts
lc = Lightcurve(times, noisy_1)
"""
Explanation: Another Example
Another example is demonstrated using a Lightcurve with Poisson Noise.
End of explanation
"""
ac = AutoCorrelation(lc, mode = 'full')
ac.corr
ac.time_lags
ac.time_shift
ac.plot()
"""
Explanation: AutoCorrelation also supports {full,same,valid} modes similar to CrossCorrelation
End of explanation
"""
|
mitliagkas/dshs | 21. Zipcode Visualization.ipynb | mit | import os.path
if not os.path.exists('zipdata/zt06_d00_ascii.zip'):
!wget -P zipdata ftp://ftp.cs.brown.edu/u/spr/zipdata/zt06_d00_ascii.zip
!unzip -d zipdata zipdata/zt06_d00_ascii.zip
if not os.path.exists('zipdata/zt48_d00_ascii.zip'):
!wget -P zipdata ftp://ftp.cs.brown.edu/u/spr/zipdata/zt48_d00_ascii.zip
!unzip -d zipdata zipdata/zt48_d00_ascii.zip
def read_ascii_boundary(filestem):
'''
Reads polygon data from an ASCII boundary file.
Returns a dictionary with polygon IDs for keys. The value for each
key is another dictionary with three keys:
'name' - the name of the polygon
'polygon' - list of (longitude, latitude) pairs defining the main
polygon boundary
'exclusions' - list of lists of (lon, lat) pairs for any exclusions in
the main polygon
'''
metadata_file = filestem + 'a.dat'
data_file = filestem + '.dat'
# Read metadata
lines = [line.strip().strip('"') for line in open(metadata_file)]
polygon_ids = lines[::6]
polygon_names = lines[2::6]
polygon_data = {}
for polygon_id, polygon_name in zip(polygon_ids, polygon_names):
# Initialize entry with name of polygon.
# In this case the polygon_name will be the 5-digit ZIP code.
polygon_data[polygon_id] = {'name': polygon_name}
del polygon_data['0']
# Read lon and lat.
f = open(data_file)
for line in f:
fields = line.split()
if len(fields) == 3:
# Initialize new polygon
polygon_id = fields[0]
polygon_data[polygon_id]['polygon'] = []
polygon_data[polygon_id]['exclusions'] = []
elif len(fields) == 1:
# -99999 denotes the start of a new sub-polygon
if fields[0] == '-99999':
polygon_data[polygon_id]['exclusions'].append([])
else:
# Add lon/lat pair to main polygon or exclusion
lon = float(fields[0])
lat = float(fields[1])
if polygon_data[polygon_id]['exclusions']:
polygon_data[polygon_id]['exclusions'][-1].append((lon, lat))
else:
polygon_data[polygon_id]['polygon'].append((lon, lat))
return polygon_data
import csv
from pylab import *
"""
Explanation: Based on the example in
http://www.christianpeccei.com/zipmap/
ZIP area data downloaded from
ftp://ftp.cs.brown.edu/u/spr/zipdata
The mapping from states to numbers can be seen here:
https://github.com/ssoper/zip-code-boundaries/blob/master/raw.html
End of explanation
"""
import mpld3
if True:
mpld3.enable_notebook()
reduced = data[['Pat_ZIP', 'Total_Charges']]
chargesbyzip = reduced.groupby('Pat_ZIP').mean()
countbyzip = reduced.groupby('Pat_ZIP').count()
#def makezipfigure(series, zipstem = 'zipdata/zt48_d00'):
series = chargesbyzip
series = countbyzip
zipstem = 'zipdata/zt48_d00'
maxvalue = series.max().values[0]
valuename = series.keys()[0]
# Read in ZIP code boundaries for Te
d = read_ascii_boundary(zipstem)
# Create figure and two axes: one to hold the map and one to hold
# the colorbar
figure(figsize=(5, 5), dpi=100)
map_axis = axes([0.0, 0.0, 0.8, 0.9])
cb_axis = axes([0.83, 0.1, 0.03, 0.8])
#map_axis = axes([0.0, 0.0, 4.0, 4.5])
#cb_axis = axes([4.15, 0.5, 0.15, 3.0])
#map_axis = axes([0.0, 0.0, 1.6, 1.8])
#cb_axis = axes([1.66, 0.2, 0.06, 1.2])
# Define colormap to color the ZIP codes.
# You can try changing this to cm.Blues or any other colormap
# to get a different effect
cmap = cm.PuRd
# Create the map axis
axes(map_axis)
gca().set_axis_off()
# Loop over the ZIP codes in the boundary file
for polygon_id in d:
polygon_data = array(d[polygon_id]['polygon'])
zipcode = d[polygon_id]['name']
try:
value = series.xs(zipcode).values[0]
# Define the color for the ZIP code
fc = cmap(float(value) / maxvalue)
except:
fc = (1.0, 1.0, 1.0, 1.0)
edgecolor = [ square(min(fc[:3])) ]*3 + [0.5]
# Draw the ZIP code
patch = Polygon(array(polygon_data), facecolor=fc,
edgecolor=edgecolor, linewidth=.1)
# patch = Polygon(array(polygon_data), facecolor=fc,
# edgecolor=(.5, .5, .5, 1), linewidth=.2)
gca().add_patch(patch)
gca().autoscale()
title(valuename + " per ZIP Code in Texas")
# Draw colorbar
cb = mpl.colorbar.ColorbarBase(cb_axis, cmap=cmap,
norm = mpl.colors.Normalize(vmin=0, vmax=maxvalue))
cb.set_label(valuename)
savefig('texas.pdf', dpi=100)
# Change all fonts to Arial
#for o in gcf().findobj(matplotlib.text.Text):
# o.set_fontname('Arial')
"""
Explanation: From:
http://mpld3.github.io/
End of explanation
"""
|
5agado/data-science-learning | graphics/heartbeat/Heartbeat.ipynb | apache-2.0 | # Basic libraries import
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib notebook
import os
import sys
import itertools
import collections
# project specific libraries
import scipy.signal as signal
%load_ext autoreload
%autoreload 2
import heartbeat_utils
"""
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Simulate-Heartbeat" data-toc-modified-id="Simulate-Heartbeat-1"><span class="toc-item-num">1 </span>Simulate Heartbeat</a></span></li><li><span><a href="#Tests-and-Plots" data-toc-modified-id="Tests-and-Plots-2"><span class="toc-item-num">2 </span>Tests and Plots</a></span></li></ul></div>
End of explanation
"""
# The "Daubechies" wavelet is a rough approximation to a real,
# single, heart beat ("pqrst") signal
pqrst = signal.wavelets.daub(10)
# Add the gap after the pqrst when the heart is resting.
samples_rest = 10
zero_array = np.zeros(samples_rest, dtype=float)
pqrst_full = np.concatenate([pqrst,zero_array])
plt.plot(pqrst_full)
plt.xlabel('Sample number')
plt.ylabel('Amplitude (normalised)')
plt.title('Heart beat signal Template')
plt.show()
# Simulated Beats per minute rate
bpm = 60
bps = bpm / 60
# Simumated period of time in seconds that the ecg is captured in
capture_length = 10
# Caculate the number of beats in capture time period
# Round the number to simplify things
num_heart_beats = int(capture_length * bps)
# Concatonate together the number of heart beats needed
ecg_template = np.tile(pqrst_full , num_heart_beats)
# Add random (gaussian distributed) noise
noise = np.random.normal(0, 0.01, len(ecg_template))
ecg_template_noisy = noise + ecg_template
plt.plot(ecg_template_noisy)
plt.xlabel('Sample number')
plt.ylabel('Amplitude (normalised)')
plt.title('Heart ECG Template with Gaussian noise')
plt.show()
"""
Explanation: Simulate Heartbeat
Source
End of explanation
"""
bpm_series = [10, 70, 30, 65, 120, 160, 90, 10]
hb = heartbeat_utils.simulate_heartbeat(bpm_series)
plt.plot(hb)
plt.xlabel('Sample number')
plt.ylabel('Amplitude (normalised)')
plt.title('Heart ECG Template with Gaussian noise')
plt.show()
from matplotlib import animation
%matplotlib notebook
fig, ax = plt.subplots(dpi=100, figsize=(5, 4))
ax.set_xlim(0, 0.25)
ax.set_ylim(-1, 1)
line, = ax.plot([], [], lw=2)
#epoch_text = ax.text(0, 0, "Epoch 0")
def animate(i, line):
x = np.linspace(0, 2, 500)
y = hb[i:i+500]
#epoch_text.set_text("Epoch {}".format(i, cost))
line.set_data(x, y)
return line
ani = animation.FuncAnimation(fig, animate, len(hb), interval=5, repeat=True,
fargs=[line])
"""
Explanation: Tests and Plots
End of explanation
"""
|
sk-rai/Network-Analysis_made-Simple | 2. Network(X) Basics (Instructor).ipynb | mit | G = nx.read_gpickle('Synthetic Social Network.pkl') #If you are Python 2.7, read in Synthetic Social Network 27.pkl
nx.draw(G)
"""
Explanation: Nodes and Edges: How do we represent relationships between individuals using NetworkX?
As mentioned earlier, networks, also known as graphs, are comprised of individual entities and their representatives. The technical term for these are nodes and edges, and when we draw them we typically use circles (nodes) and lines (edges).
In this notebook, we will work with a synthetic (i.e. simulated) social network, in which nodes are individual people, and edges represent their relationships. If two nodes have an edge between them, then those two individauls know one another.
Data Representation
In the networkx implementation, graph objects store their data in dictionaries.
Nodes are part of the attribute Graph.node, which is a dictionary where the key is the node ID and the values are a dictionary of attributes.
Edges are part of the attribute Graph.edge, which is a nested dictionary. Data are accessed as such: G.edge[node1][node2]['attr_name'].
Because of the dictionary implementation of the graph, any hashable object can be a node. This means strings and tuples, but not lists and sets.
Synthetic Social Network
With this synthetic social network, we will attempt to answer the following basic questions using the NetworkX API:
How many people are present in the network?
What is the distribution of attributes of the people in this network?
How many relationships are represented in the network?
What is the distribution of the number of friends that each person has?
First off, let's load up the synthetic social network. This will show you through some of the basics of NetworkX.
For those who are interested, I simply created an Erdõs-Rényi graph with n=30 and p=0.1. I used randomized functions that I wrote to generate attributes and append them to each node and edge. I then pickled the graph to disk.
End of explanation
"""
# Who are represented in the network?
G.nodes(data=True)
"""
Explanation: Basic Network Statistics
Let's first understand how many people and relationships are represented in the network.
End of explanation
"""
len(G.nodes())
# Who is connected to who in the network?
G.edges()
"""
Explanation: Exercise: Can you write a single line of code that returns the number of individuals represented?
End of explanation
"""
len(G.edges())
"""
Explanation: Exercise
Can you write a single line of code that returns the number of relationships represented?
End of explanation
"""
# Let's get a list of nodes with their attributes.
G.nodes(data=True)
# NetworkX will return a list of tuples in the form (node_id, attribute_dictionary)
"""
Explanation: Since this is a social network of people, there'll be attributes for each individual, such as age, and sex. We can grab that data off from the attributes that are stored with each node.
End of explanation
"""
from collections import Counter
Counter([d['sex'] for n, d in G.nodes(data=True)])
"""
Explanation: Exercise
Can you count how many males and females are represented in the graph?
Hint: You may want to use the Counter object from the collections module.
End of explanation
"""
G.edges(data=True)
"""
Explanation: Edges can also store attributes in their attribute dictionary.
End of explanation
"""
# Answer
dates = [d['date'] for _, _, d in G.edges(data=True)]
mindate = min(dates)
maxdate = max(dates)
print(mindate, maxdate)
"""
Explanation: In this synthetic social network, I have stored the date as a datetime object. Datetime objects have attributes, namely .year, .month, .day.
Exercise
Can you figure out the range of dates during which these relationships were forged?
End of explanation
"""
# Answer
G.add_node(31, age=22, sex='Male')
G.add_node(32, age=24, sex='Female')
G.add_edge(31, 32, date=datetime(2010,1,9))
G.add_edge(31, 7, date=datetime(2009,12,11))
G.add_edge(32, 7, date=datetime(2009,12,11))
G.node[31]
"""
Explanation: Exercise
We found out that there are two individuals that we left out of the network, individual no. 31 and 32. They are one male (31) and one female (32), their ages are 22 and 24 respectively, they knew each other on 2010-01-09, and together, they both knew individual 7, on 2009-12-11. Use the functions G.add_node() and G.add_edge() to introduce this data into the network.
If you need more help, check out https://networkx.github.io/documentation/latest/tutorial/tutorial.html
End of explanation
"""
ptG = nx.DiGraph() #ptG stands for PyCon Tutorial Graph.
# Add in nodes and edges
ptG.add_node('Eric', nationality='Canada')
ptG.add_node('Paul', nationality='Canada') # (my own TextExpander shortcut is ;addnode)
ptG.add_node('Max', nationality='US')
ptG.add_node('Martin', nationality='Other')
ptG.add_node('Jim', nationality='US')
ptG.add_node('Lucas', nationality='US')
ptG.add_node('Thomas', nationality='US')
ptG.add_node('Brad', nationality='US')
ptG.add_node('Troy', nationality='Canada')
ptG.add_node('Cory', nationality='Canada')
ptG.add_node('Gokhan', nationality='US')
ptG.add_node('Riley', nationality='US')
ptG.add_node('Steve', nationality='US')
ptG.add_node('Ryan', nationality='US')
ptG.add_node('Andrew', nationality='US')
ptG.add_node('Ronan', nationality='Other')
ptG.add_node('Cody', nationality='Canada')
ptG.add_node('Jon', nationality='US')
ptG.add_node('Eric2', nationality='US')
ptG.add_node('William', nationality='US')
ptG.add_node('Tom', nationality='Other')
ptG.add_node('Chris', nationality='US')
ptG.add_node('Stu', nationality='US')
ptG.add_node('Zach', nationality='US')
ptG.add_node('Clint', nationality='Canada')
ptG.add_node('Aaron', nationality='US')
ptG.add_node('Vishal', nationality='US')
ptG.add_node('Federico', nationality='Other')
ptG.add_edge('Vishal', 'Aaron')
ptG.add_edge('Vishal', 'Eric')
ptG.add_edge('Aaron', 'Vishal')
ptG.add_edge('Aaron', 'Eric')
ptG.add_edge('Clint', 'Zach')
ptG.add_edge('Clint', 'Eric')
ptG.add_edge('Zach', 'Clint')
ptG.add_edge('Zach', 'Riley')
ptG.add_edge('Zach', 'Stu')
ptG.add_edge('Stu', 'Zach')
ptG.add_edge('Stu', 'Eric')
ptG.add_edge('Stu', 'Chris')
ptG.add_edge('Chris', 'Stu')
ptG.add_edge('Chris', 'Eric')
ptG.add_edge('Tom', 'Tom')
ptG.add_edge('William', 'Jon')
ptG.add_edge('William', 'Eric2')
ptG.add_edge('William', 'Eric')
ptG.add_edge('Eric2', 'William')
ptG.add_edge('Eric2', 'Jon')
ptG.add_edge('Jon', 'Eric2')
ptG.add_edge('Jon', 'William')
ptG.add_edge('Jon', 'Eric')
ptG.add_edge('Cody', 'Eric')
ptG.add_edge('Cody', 'Ronan')
ptG.add_edge('Ronan', 'Eric')
ptG.add_edge('Ronan', 'Cody')
ptG.add_edge('Andrew', 'Eric')
ptG.add_edge('Andrew', 'Ryan')
ptG.add_edge('Ryan', 'Eric')
ptG.add_edge('Ryan', 'Andrew')
ptG.add_edge('Steve', 'Eric')
ptG.add_edge('Riley', 'Zach')
ptG.add_edge('Paul', 'Paul') # (my own TextExpander shortcut is ;addedge)
ptG.add_edge('Martin', 'Max')
ptG.add_edge('Max', 'Paul')
ptG.add_edge('Martin', 'Eric')
ptG.add_edge('Martin', 'Max')
ptG.add_edge('Jim', 'Federico')
ptG.add_edge('Lucas', 'Thomas')
ptG.add_edge('Brad', 'Eric')
ptG.add_edge('Thomas', 'Lucas')
ptG.add_edge('Troy', 'Cory')
ptG.add_edge('Troy', 'Eric')
ptG.add_edge('Cory', 'Troy')
ptG.add_edge('Gokhan', 'Max')
# We are now going to draw the network using a hive plot, grouping the nodes by the top two nationality groups, and 'others'
# for the third group.
nodes = dict()
nodes['Canada'] = [n for n, d in ptG.nodes(data=True) if d['nationality'] == 'Canada'] #list comprehension here
nodes['US'] = [n for n, d in ptG.nodes(data=True) if d['nationality'] == 'US'] #list comprehension here
nodes['Other'] = [n for n, d in ptG.nodes(data=True) if d['nationality'] == 'Other'] #list comprehension here
edges = dict()
edges['group1'] = [(n1, n2, d) for n1, n2, d in ptG.edges(data=True)] #list comprehension here
nodes_cmap = dict()
nodes_cmap['Canada'] = 'blue'
nodes_cmap['US'] = 'green'
nodes_cmap['Other'] = 'black'
edges_cmap = dict()
edges_cmap['group1'] = 'black'
from hiveplot import HivePlot
h = HivePlot(nodes, edges, nodes_cmap, edges_cmap)
h.set_minor_angle(np.pi / 12) #optional
h.draw()
"""
Explanation: Live Exercise
While we're on the matter of graph construction, let's take a look at our tutorial class. On your sheet of paper, you should have a list of names - these are people for which you knew their name prior to coming to class.
As we iterate over the class, I would like you to holler out your name, your nationality, and in a very slow fashion, the names of the people who you knew in the class.
End of explanation
"""
nx.draw(G)
"""
Explanation: Coding Patterns
These are some recommended coding patterns when doing network analysis using NetworkX, which stem from my roughly two years of experience with the package.
Iterating using List Comprehensions
I would recommend that you use the following for compactness:
[d['attr'] for n, d in G.nodes(data=True)]
And if the node is unimportant, you can do:
[d['attr'] for _, d in G.nodes(data=True)]
Iterating over Edges using List Comprehensions
A similar pattern can be used for edges:
[n2 for n1, n2, d in G.edges(data=True)]
or
[n2 for _, n2, d in G.edges(data=True)]
If the graph you are constructing is a directed graph, with a "source" and "sink" available, then I would recommend the following pattern:
[(sc, sk) for sc, sk, d in G.edges(data=True)]
or
[d['attr'] for sc, sk, d in G.edges(data=True)]
Drawing Graphs
As illustrated above, we can draw graphs using the nx.draw() function. The most popular format for drawing graphs is the node-link diagram.
End of explanation
"""
nx.draw(G, with_labels=True)
"""
Explanation: If the network is small enough to visualize, and the node labels are small enough to fit in a circle, then you can use the with_labels=True argument.
End of explanation
"""
matrix = nx.to_numpy_matrix(G)
plt.pcolor(np.array(matrix))
plt.axes().set_aspect('equal') # set aspect ratio equal to get a square visualization
plt.xlim(min(G.nodes()), max(G.nodes())) # set x and y limits to the number of nodes present.
plt.ylim(min(G.nodes()), max(G.nodes()))
plt.title('Adjacency Matrix')
plt.show()
"""
Explanation: However, note that if the number of nodes in the graph gets really large, node-link diagrams can begin to look like massive hairballs. This is undesirable for graph visualization.
Instead, we can use a matrix to represent them. The nodes are on the x- and y- axes, and a filled square represent an edge between the nodes. This is done by using the nx.to_numpy_matrix(G) function.
We then use matplotlib's pcolor(numpy_array) function to plot. Because pcolor cannot take in numpy matrices, we will cast the matrix as an array of arrays, and then get pcolor to plot it.
End of explanation
"""
from circos import CircosPlot
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
nodes = sorted(G.nodes())
edges = G.edges()
c = CircosPlot(nodes, edges, radius=10, ax=ax)
c.draw()
"""
Explanation: Let's try another visualization, the Circos plot. We can order the nodes in the Circos plot according to the node ID, but any other ordering is possible as well. Edges are drawn between two nodes.
Credit goes to Justin Zabilansky (MIT) for the implementation.
End of explanation
"""
nodes = dict()
nodes['male'] = [n for n,d in G.nodes(data=True) if d['sex'] == 'Male']
nodes['female'] = [n for n,d in G.nodes(data=True) if d['sex'] == 'Female']
edges = dict()
edges['group1'] = G.edges(data=True)
nodes_cmap = dict()
nodes_cmap['male'] = 'blue'
nodes_cmap['female'] = 'red'
edges_cmap = dict()
edges_cmap['group1'] = 'black'
h = HivePlot(nodes, edges, nodes_cmap, edges_cmap)
h.draw()
"""
Explanation: It's pretty obvious in this visualization that there are nodes, such as node 5 and 18, that are not connected to any other node via an edge. There are other nodes, like node number 19, which is highly connected to other nodes.
Finally, let's try hive plots for the network. Two groups (male and female), and then edges drawn between them.
End of explanation
"""
|
Neuroglycerin/neukrill-net-work | notebooks/model_run_and_result_analyses/Analyse models-Copy1.ipynb | mit | m.layer_names
channel = m.monitor.channels["valid_y_nll"]
hl.Curve(zip(channel.epoch_record, channel.val_record),label="valid_y_nll")
channel = m.monitor.channels["valid_y_nll"]
plt.plot(channel.epoch_record, channel.val_record)
"""
Explanation: The train_y_nll, valid_y_nll and valid_objective show massive overfitting.
End of explanation
"""
ch1 = m.monitor.channels["valid_y_nll"]
ch2 = m.monitor.channels["train_y_nll"]
hl.Curve(zip(ch1.epoch_record[-40:], ch1.val_record[-40:]),label="valid_y_nll")
hl.Curve(zip(ch2.epoch_record[-40:], ch2.val_record[-40:]),label="train_y_nll")
m = pylearn2.utils.serial.load(
"/disk/scratch/neuroglycerin/models/continue_hopeful_recent.pkl")
make_curves(m,"valid_objective","valid_y_nll","train_y_nll")
"""
Explanation: Hard to see whether it is still learning...
End of explanation
"""
|
digital-humanities-data-curation/hilt2015 | 3-csvkit-intro.ipynb | mit | import tarfile
import re
import os
from itertools import count
# You have a copy of this file in your `data` directory. Tate provides the data in a single TAR (tape archive) file
DATA_PATH = '../data/tate-collection-1.2.tar.gz'
DATA_FOBJ = tarfile.open(DATA_PATH)
# We can use Python's tools for working with tar files to inspect the data package
# For instance by listing the files it contains without unpacking it
FILES = DATA_FOBJ.getmembers()
for i, f in enumerate(FILES[:10]):
print('{0} \t {1}'.format(i, f))
# This time we're only going to extract the CSV file
DATA_FOBJ.extractall(path='../data/tate-collection', members=FILES[0:4])
CSV_FILE_PATH = '../data/tate-collection/collection-1.2/artist_data.csv'
"""
Explanation: Tools for CSV: csvkit
Like jq for JSON, csvkit provides a suite of tools for working with data in comma separated value (CSV) formats. Again, one of the chief virtues of the csvkit tools is the way they can be combined with other common unix tools. Also the documentation is quite good.
End of explanation
"""
%%bash
csvlook ../data/tate-collection/collection-1.2/artist_data.csv | head -n 5
"""
Explanation: Exploring
csvkit's csvlook tool gives us a way to inspect the contents of CSV files without opening them up in a program like Excel (especially when the file is so large it might crash our program)
End of explanation
"""
!csvcut -n ../data/tate-collection/collection-1.2/artist_data.csv
"""
Explanation: What we're seeing here is effectively a plain text display of our CSV data … not super pretty but faster than opening Excel. Let's make this more useful
Selecting and Slicing
We can combine this facility with the csvcut tool to see only subsets of our data. First, let's use the -n flag to print out the column headings
End of explanation
"""
!csvcut -c 1,2,5,6,9 ../data/tate-collection/collection-1.2/artist_data.csv
"""
Explanation: We can look at just a subset of columns …
End of explanation
"""
!csvcut -c name,url ../data/tate-collection/collection-1.2/artist_data.csv
"""
Explanation: Once you know the column names you can also use those to subset or slice:
End of explanation
"""
!csvcut -c name,dates ../data/tate-collection/collection-1.2/artist_data.csv | csvlook | head -n 10
"""
Explanation: Now combine these tools together using unix pipes
End of explanation
"""
!csvcut -c name,yearOfBirth,yearOfDeath,url ../data/tate-collection/collection-1.2/artist_data.csv | \
csvstat
"""
Explanation: In our case, this still doesn't look that helpful because some of these columns (like name) are really wide
Summary Statistics
The csvkit tools do offer other ways of peeking at out data that can prove useful to us — for instance through providing summary statistics.
End of explanation
"""
!csvcut -c name,yearOfBirth,yearOfDeath ../data/tate-collection/collection-1.2/artist_data.csv | \
csvgrep -c yearOfBirth -m 1497 | csvlook
!csvcut -c name,yearOfBirth,yearOfDeath ../data/tate-collection/collection-1.2/artist_data.csv | \
csvgrep -c yearOfDeath -m 2005 | csvlook
"""
Explanation: Super useful!
Searching
We can know use tools for searching (or "grepping") to find specific bits of information in our file — csvkit provides a csvgrep tool for doing just this
End of explanation
"""
!csvcut -c name,yearOfBirth,yearOfDeath ../data/tate-collection/collection-1.2/artist_data.csv | \
csvgrep -c yearOfDeath -m 2005 | csvsort -c yearOfBirth | csvlook
"""
Explanation: Let's sort by the year of birth to see which of our 2005 decedents were the oldest
End of explanation
"""
!csvsql --query "select name from artist_data where yearOfBirth > 1700;" \
../data/tate-collection/collection-1.2/artist_data.csv | csvlook
"""
Explanation: Power Usage
One of the really cool features of csvkit is that it allows us to move back and forth between CSVs and relational database structures quickly and easily — and even to treat our CSVs (for the purpose of rapid exploration) as though they were databases we could run SQL queries against
End of explanation
"""
!csvsql -i sqlite ../data/tate-collection/collection-1.2/artist_data.csv
!csvsql --db sqlite:///tate_artists.db --insert ../data/tate-collection/collection-1.2/artist_data.csv
!sql2csv --db sqlite:///tate_artists.db --query "select * from artist_data"
"""
Explanation: This facility is best for one-off ad hoc queries and, since csvkit is building a little in-memory database behind the scenes to make it possible, large datasets can be very slow to work with this way.
However, csvkit SQL tools also make it really easy to turn CSVs into a proper database — by creating the necessary SQL statements for you and trying to guess the correct data types for columns. csvsql will even load the data for you if you have an SQL database installed.
End of explanation
"""
!sql2csv --db sqlite:///tate_artists.db --query "select * from artist_data where gender='Female'"
!sql2csv --db sqlite:///tate_artists.db --query "select name,dates from artist_data where gender='Female'" | \
csvlook
"""
Explanation: Imported successfully, now we can run any SQL query we want
End of explanation
"""
|
johnnyliu27/openmc | examples/jupyter/mg-mode-part-i.ipynb | mit | import os
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import openmc
%matplotlib inline
"""
Explanation: This Notebook illustrates the usage of OpenMC's multi-group calculational mode with the Python API. This example notebook creates and executes the 2-D C5G7 benchmark model using the openmc.MGXSLibrary class to create the supporting data library on the fly.
Generate MGXS Library
End of explanation
"""
# Create a 7-group structure with arbitrary boundaries (the specific boundaries are unimportant)
groups = openmc.mgxs.EnergyGroups(np.logspace(-5, 7, 8))
uo2_xsdata = openmc.XSdata('uo2', groups)
uo2_xsdata.order = 0
# When setting the data let the object know you are setting the data for a temperature of 294K.
uo2_xsdata.set_total([1.77949E-1, 3.29805E-1, 4.80388E-1, 5.54367E-1,
3.11801E-1, 3.95168E-1, 5.64406E-1], temperature=294.)
uo2_xsdata.set_absorption([8.0248E-03, 3.7174E-3, 2.6769E-2, 9.6236E-2,
3.0020E-02, 1.1126E-1, 2.8278E-1], temperature=294.)
uo2_xsdata.set_fission([7.21206E-3, 8.19301E-4, 6.45320E-3, 1.85648E-2,
1.78084E-2, 8.30348E-2, 2.16004E-1], temperature=294.)
uo2_xsdata.set_nu_fission([2.005998E-2, 2.027303E-3, 1.570599E-2, 4.518301E-2,
4.334208E-2, 2.020901E-1, 5.257105E-1], temperature=294.)
uo2_xsdata.set_chi([5.87910E-1, 4.11760E-1, 3.39060E-4, 1.17610E-7,
0.00000E-0, 0.00000E-0, 0.00000E-0], temperature=294.)
"""
Explanation: We will now create the multi-group library using data directly from Appendix A of the C5G7 benchmark documentation. All of the data below will be created at 294K, consistent with the benchmark.
This notebook will first begin by setting the group structure and building the groupwise data for UO2. As you can see, the cross sections are input in the order of increasing groups (or decreasing energy).
Note: The C5G7 benchmark uses transport-corrected cross sections. So the total cross section we input here will technically be the transport cross section.
End of explanation
"""
# The scattering matrix is ordered with incoming groups as rows and outgoing groups as columns
# (i.e., below the diagonal is up-scattering).
scatter_matrix = \
[[[1.27537E-1, 4.23780E-2, 9.43740E-6, 5.51630E-9, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 3.24456E-1, 1.63140E-3, 3.14270E-9, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 4.50940E-1, 2.67920E-3, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 4.52565E-1, 5.56640E-3, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 1.25250E-4, 2.71401E-1, 1.02550E-2, 1.00210E-8],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 1.29680E-3, 2.65802E-1, 1.68090E-2],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 8.54580E-3, 2.73080E-1]]]
scatter_matrix = np.array(scatter_matrix)
scatter_matrix = np.rollaxis(scatter_matrix, 0, 3)
uo2_xsdata.set_scatter_matrix(scatter_matrix, temperature=294.)
"""
Explanation: We will now add the scattering matrix data.
Note: Most users familiar with deterministic transport libraries are already familiar with the idea of entering one scattering matrix for every order (i.e. scattering order as the outer dimension). However, the shape of OpenMC's scattering matrix entry is instead [Incoming groups, Outgoing Groups, Scattering Order] to best enable other scattering representations. We will follow the more familiar approach in this notebook, and then use numpy's numpy.rollaxis function to change the ordering to what we need (scattering order on the inner dimension).
End of explanation
"""
# Initialize the library
mg_cross_sections_file = openmc.MGXSLibrary(groups)
# Add the UO2 data to it
mg_cross_sections_file.add_xsdata(uo2_xsdata)
# And write to disk
mg_cross_sections_file.export_to_hdf5('mgxs.h5')
"""
Explanation: Now that the UO2 data has been created, we can move on to the remaining materials using the same process.
However, we will actually skip repeating the above for now. Our simulation will instead use the c5g7.h5 file that has already been created using exactly the same logic as above, but for the remaining materials in the benchmark problem.
For now we will show how you would use the uo2_xsdata information to create an openmc.MGXSLibrary object and write to disk.
End of explanation
"""
# For every cross section data set in the library, assign an openmc.Macroscopic object to a material
materials = {}
for xs in ['uo2', 'mox43', 'mox7', 'mox87', 'fiss_chamber', 'guide_tube', 'water']:
materials[xs] = openmc.Material(name=xs)
materials[xs].set_density('macro', 1.)
materials[xs].add_macroscopic(xs)
"""
Explanation: Generate 2-D C5G7 Problem Input Files
To build the actual 2-D model, we will first begin by creating the materials.xml file.
First we need to define materials that will be used in the problem. In other notebooks, either nuclides or elements were added to materials at the equivalent stage. We can do that in multi-group mode as well. However, multi-group cross-sections are sometimes provided as macroscopic cross-sections; the C5G7 benchmark data are macroscopic. In this case, we can instead use the Material.add_macroscopic method to specific a macroscopic object. Unlike for nuclides and elements, we do not need provide information on atom/weight percents as no number densities are needed.
When assigning macroscopic objects to a material, the density can still be scaled by setting the density to a value that is not 1.0. This would be useful, for example, when slightly perturbing the density of water due to a small change in temperature (while of course ignoring any resultant spectral shift). The density of a macroscopic dataset is set to 1.0 in the openmc.Material object by default when a macroscopic dataset is used; so we will show its use the first time and then afterwards it will not be required.
Aside from these differences, the following code is very similar to similar code in other OpenMC example Notebooks.
End of explanation
"""
# Instantiate a Materials collection, register all Materials, and export to XML
materials_file = openmc.Materials(materials.values())
# Set the location of the cross sections file to our pre-written set
materials_file.cross_sections = 'c5g7.h5'
materials_file.export_to_xml()
"""
Explanation: Now we can go ahead and produce a materials.xml file for use by OpenMC
End of explanation
"""
# Create the surface used for each pin
pin_surf = openmc.ZCylinder(x0=0, y0=0, R=0.54, name='pin_surf')
# Create the cells which will be used to represent each pin type.
cells = {}
universes = {}
for material in materials.values():
# Create the cell for the material inside the cladding
cells[material.name] = openmc.Cell(name=material.name)
# Assign the half-spaces to the cell
cells[material.name].region = -pin_surf
# Register the material with this cell
cells[material.name].fill = material
# Repeat the above for the material outside the cladding (i.e., the moderator)
cell_name = material.name + '_moderator'
cells[cell_name] = openmc.Cell(name=cell_name)
cells[cell_name].region = +pin_surf
cells[cell_name].fill = materials['water']
# Finally add the two cells we just made to a Universe object
universes[material.name] = openmc.Universe(name=material.name)
universes[material.name].add_cells([cells[material.name], cells[cell_name]])
"""
Explanation: Our next step will be to create the geometry information needed for our assembly and to write that to the geometry.xml file.
We will begin by defining the surfaces, cells, and universes needed for each of the individual fuel pins, guide tubes, and fission chambers.
End of explanation
"""
lattices = {}
# Instantiate the UO2 Lattice
lattices['UO2 Assembly'] = openmc.RectLattice(name='UO2 Assembly')
lattices['UO2 Assembly'].dimension = [17, 17]
lattices['UO2 Assembly'].lower_left = [-10.71, -10.71]
lattices['UO2 Assembly'].pitch = [1.26, 1.26]
u = universes['uo2']
g = universes['guide_tube']
f = universes['fiss_chamber']
lattices['UO2 Assembly'].universes = \
[[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, f, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u]]
# Create a containing cell and universe
cells['UO2 Assembly'] = openmc.Cell(name='UO2 Assembly')
cells['UO2 Assembly'].fill = lattices['UO2 Assembly']
universes['UO2 Assembly'] = openmc.Universe(name='UO2 Assembly')
universes['UO2 Assembly'].add_cell(cells['UO2 Assembly'])
# Instantiate the MOX Lattice
lattices['MOX Assembly'] = openmc.RectLattice(name='MOX Assembly')
lattices['MOX Assembly'].dimension = [17, 17]
lattices['MOX Assembly'].lower_left = [-10.71, -10.71]
lattices['MOX Assembly'].pitch = [1.26, 1.26]
m = universes['mox43']
n = universes['mox7']
o = universes['mox87']
g = universes['guide_tube']
f = universes['fiss_chamber']
lattices['MOX Assembly'].universes = \
[[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m],
[m, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, m],
[m, n, n, n, n, g, n, n, g, n, n, g, n, n, n, n, m],
[m, n, n, g, n, o, o, o, o, o, o, o, n, g, n, n, m],
[m, n, n, n, o, o, o, o, o, o, o, o, o, n, n, n, m],
[m, n, g, o, o, g, o, o, g, o, o, g, o, o, g, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, g, o, o, g, o, o, f, o, o, g, o, o, g, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, g, o, o, g, o, o, g, o, o, g, o, o, g, n, m],
[m, n, n, n, o, o, o, o, o, o, o, o, o, n, n, n, m],
[m, n, n, g, n, o, o, o, o, o, o, o, n, g, n, n, m],
[m, n, n, n, n, g, n, n, g, n, n, g, n, n, n, n, m],
[m, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, m],
[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m]]
# Create a containing cell and universe
cells['MOX Assembly'] = openmc.Cell(name='MOX Assembly')
cells['MOX Assembly'].fill = lattices['MOX Assembly']
universes['MOX Assembly'] = openmc.Universe(name='MOX Assembly')
universes['MOX Assembly'].add_cell(cells['MOX Assembly'])
# Instantiate the reflector Lattice
lattices['Reflector Assembly'] = openmc.RectLattice(name='Reflector Assembly')
lattices['Reflector Assembly'].dimension = [1,1]
lattices['Reflector Assembly'].lower_left = [-10.71, -10.71]
lattices['Reflector Assembly'].pitch = [21.42, 21.42]
lattices['Reflector Assembly'].universes = [[universes['water']]]
# Create a containing cell and universe
cells['Reflector Assembly'] = openmc.Cell(name='Reflector Assembly')
cells['Reflector Assembly'].fill = lattices['Reflector Assembly']
universes['Reflector Assembly'] = openmc.Universe(name='Reflector Assembly')
universes['Reflector Assembly'].add_cell(cells['Reflector Assembly'])
"""
Explanation: The next step is to take our universes (representing the different pin types) and lay them out in a lattice to represent the assembly types
End of explanation
"""
lattices['Core'] = openmc.RectLattice(name='3x3 core lattice')
lattices['Core'].dimension= [3, 3]
lattices['Core'].lower_left = [-32.13, -32.13]
lattices['Core'].pitch = [21.42, 21.42]
r = universes['Reflector Assembly']
u = universes['UO2 Assembly']
m = universes['MOX Assembly']
lattices['Core'].universes = [[u, m, r],
[m, u, r],
[r, r, r]]
# Create boundary planes to surround the geometry
min_x = openmc.XPlane(x0=-32.13, boundary_type='reflective')
max_x = openmc.XPlane(x0=+32.13, boundary_type='vacuum')
min_y = openmc.YPlane(y0=-32.13, boundary_type='vacuum')
max_y = openmc.YPlane(y0=+32.13, boundary_type='reflective')
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = lattices['Core']
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y
# Create root Universe
root_universe = openmc.Universe(name='root universe', universe_id=0)
root_universe.add_cell(root_cell)
"""
Explanation: Let's now create the core layout in a 3x3 lattice where each lattice position is one of the assemblies we just defined.
After that we can create the final cell to contain the entire core.
End of explanation
"""
root_universe.plot(origin=(0., 0., 0.), width=(3 * 21.42, 3 * 21.42), pixels=(500, 500),
color_by='material')
"""
Explanation: Before we commit to the geometry, we should view it using the Python API's plotting capability
End of explanation
"""
# Create Geometry and set root Universe
geometry = openmc.Geometry(root_universe)
# Export to "geometry.xml"
geometry.export_to_xml()
"""
Explanation: OK, it looks pretty good, let's go ahead and write the file
End of explanation
"""
tallies_file = openmc.Tallies()
# Instantiate a tally Mesh
mesh = openmc.Mesh()
mesh.type = 'regular'
mesh.dimension = [17 * 2, 17 * 2]
mesh.lower_left = [-32.13, -10.71]
mesh.upper_right = [+10.71, +32.13]
# Instantiate tally Filter
mesh_filter = openmc.MeshFilter(mesh)
# Instantiate the Tally
tally = openmc.Tally(name='mesh tally')
tally.filters = [mesh_filter]
tally.scores = ['fission']
# Add tally to collection
tallies_file.append(tally)
# Export all tallies to a "tallies.xml" file
tallies_file.export_to_xml()
"""
Explanation: We can now create the tally file information. The tallies will be set up to give us the pin powers in this notebook. We will do this with a mesh filter, with one mesh cell per pin.
End of explanation
"""
# OpenMC simulation parameters
batches = 150
inactive = 50
particles = 5000
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Tell OpenMC this is a multi-group problem
settings_file.energy_mode = 'multi-group'
# Set the verbosity to 6 so we dont see output for every batch
settings_file.verbosity = 6
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-32.13, -10.71, -1e50, 10.71, 32.13, 1e50]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
# Tell OpenMC we want to run in eigenvalue mode
settings_file.run_mode = 'eigenvalue'
# Export to "settings.xml"
settings_file.export_to_xml()
"""
Explanation: With the geometry and materials finished, we now just need to define simulation parameters for the settings.xml file. Note the use of the energy_mode attribute of our settings_file object. This is used to tell OpenMC that we intend to run in multi-group mode instead of the default continuous-energy mode. If we didn't specify this but our cross sections file was not a continuous-energy data set, then OpenMC would complain.
This will be a relatively coarse calculation with only 500,000 active histories. A benchmark-fidelity run would of course require many more!
End of explanation
"""
# Run OpenMC
openmc.run()
"""
Explanation: Let's go ahead and execute the simulation! You'll notice that the output for multi-group mode is exactly the same as for continuous-energy. The differences are all under the hood.
End of explanation
"""
# Load the last statepoint file and keff value
sp = openmc.StatePoint('statepoint.' + str(batches) + '.h5')
# Get the OpenMC pin power tally data
mesh_tally = sp.get_tally(name='mesh tally')
fission_rates = mesh_tally.get_values(scores=['fission'])
# Reshape array to 2D for plotting
fission_rates.shape = mesh.dimension
# Normalize to the average pin power
fission_rates /= np.mean(fission_rates[fission_rates > 0.])
# Force zeros to be NaNs so their values are not included when matplotlib calculates
# the color scale
fission_rates[fission_rates == 0.] = np.nan
# Plot the pin powers and the fluxes
plt.figure()
plt.imshow(fission_rates, interpolation='none', cmap='jet', origin='lower')
plt.colorbar()
plt.title('Pin Powers')
plt.show()
"""
Explanation: Results Visualization
Now that we have run the simulation, let's look at the fission rate and flux tallies that we tallied.
End of explanation
"""
|
metpy/MetPy | v0.4/_downloads/Natural_Neighbor_Verification.ipynb | bsd-3-clause | import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import ConvexHull, Delaunay, delaunay_plot_2d, Voronoi, voronoi_plot_2d
from scipy.spatial.distance import euclidean
from metpy.gridding import polygons, triangles
from metpy.gridding.interpolation import nn_point
plt.rcParams['figure.figsize'] = (15, 10)
"""
Explanation: Natural Neighbor Verification
Walks through the steps of Natural Neighbor interpolation to validate that the algorithmic
approach taken in MetPy is correct.
Find natural neighbors visual test
A triangle is a natural neighbor for a point if the
circumscribed circle <https://en.wikipedia.org/wiki/Circumscribed_circle>_ of the
triangle contains that point. It is important that we correctly grab the correct triangles
for each point before proceeding with the interpolation.
Algorithmically:
We place all of the grid points in a KDTree. These provide worst-case O(n) time
complexity for spatial searches.
We generate a Delaunay Triangulation <https://docs.scipy.org/doc/scipy/
reference/tutorial/spatial.html#delaunay-triangulations>_
using the locations of the provided observations.
For each triangle, we calculate its circumcenter and circumradius. Using
KDTree, we then assign each grid a triangle that has a circumcenter within a
circumradius of the grid's location.
The resulting dictionary uses the grid index as a key and a set of natural
neighbor triangles in the form of triangle codes from the Delaunay triangulation.
This dictionary is then iterated through to calculate interpolation values.
We then traverse the ordered natural neighbor edge vertices for a particular
grid cell in groups of 3 (n - 1, n, n + 1), and perform calculations to generate
proportional polygon areas.
Circumcenter of (n - 1), n, grid_location
Circumcenter of (n + 1), n, grid_location
Determine what existing circumcenters (ie, Delaunay circumcenters) are associated
with vertex n, and add those as polygon vertices. Calculate the area of this polygon.
Increment the current edges to be checked, i.e.:
n - 1 = n, n = n + 1, n + 1 = n + 2
Repeat steps 5 & 6 until all of the edge combinations of 3 have been visited.
Repeat steps 4 through 7 for each grid cell.
End of explanation
"""
np.random.seed(100)
pts = np.random.randint(0, 100, (10, 2))
xp = pts[:, 0]
yp = pts[:, 1]
zp = (pts[:, 0] * pts[:, 0]) / 1000
tri = Delaunay(pts)
delaunay_plot_2d(tri)
for i, zval in enumerate(zp):
plt.annotate('{} F'.format(zval), xy=(pts[i, 0] + 2, pts[i, 1]))
sim_gridx = [30., 60.]
sim_gridy = [30., 60.]
plt.plot(sim_gridx, sim_gridy, '+', markersize=10)
plt.axes().set_aspect('equal', 'datalim')
plt.title('Triangulation of observations and test grid cell '
'natural neighbor interpolation values')
members, tri_info = triangles.find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy)))
val = nn_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri, members[0], tri_info)
plt.annotate('grid 0: {:.3f}'.format(val), xy=(sim_gridx[0] + 2, sim_gridy[0]))
val = nn_point(xp, yp, zp, (sim_gridx[1], sim_gridy[1]), tri, members[1], tri_info)
plt.annotate('grid 1: {:.3f}'.format(val), xy=(sim_gridx[1] + 2, sim_gridy[1]))
"""
Explanation: For a test case, we generate 10 random points and observations, where the
observation values are just the x coordinate value times the y coordinate
value divided by 1000.
We then create two test points (grid 0 & grid 1) at which we want to
estimate a value using natural neighbor interpolation.
The locations of these observations are then used to generate a Delaunay triangulation.
End of explanation
"""
def draw_circle(x, y, r, m, label):
nx = x + r * np.cos(np.deg2rad(list(range(360))))
ny = y + r * np.sin(np.deg2rad(list(range(360))))
plt.plot(nx, ny, m, label=label)
members, tri_info = triangles.find_natural_neighbors(tri, list(zip(sim_gridx, sim_gridy)))
delaunay_plot_2d(tri)
plt.plot(sim_gridx, sim_gridy, 'ks', markersize=10)
for i, info in tri_info.items():
x_t = info['cc'][0]
y_t = info['cc'][1]
if i in members[1] and i in members[0]:
draw_circle(x_t, y_t, info['r'], 'm-', str(i) + ': grid 1 & 2')
plt.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[0]:
draw_circle(x_t, y_t, info['r'], 'r-', str(i) + ': grid 0')
plt.annotate(str(i), xy=(x_t, y_t), fontsize=15)
elif i in members[1]:
draw_circle(x_t, y_t, info['r'], 'b-', str(i) + ': grid 1')
plt.annotate(str(i), xy=(x_t, y_t), fontsize=15)
else:
draw_circle(x_t, y_t, info['r'], 'k:', str(i) + ': no match')
plt.annotate(str(i), xy=(x_t, y_t), fontsize=9)
plt.axes().set_aspect('equal', 'datalim')
plt.legend()
"""
Explanation: Using the circumcenter and circumcircle radius information from
:func:metpy.gridding.triangles.find_natural_neighbors, we can visually
examine the results to see if they are correct.
End of explanation
"""
x_t, y_t = tri_info[8]['cc']
r = tri_info[8]['r']
print('Distance between grid0 and Triangle 8 circumcenter:',
euclidean([x_t, y_t], [sim_gridx[0], sim_gridy[0]]))
print('Triangle 8 circumradius:', r)
"""
Explanation: What?....the circle from triangle 8 looks pretty darn close. Why isn't
grid 0 included in that circle?
End of explanation
"""
cc = np.array([tri_info[m]['cc'] for m in members[0]])
r = np.array([tri_info[m]['r'] for m in members[0]])
print('circumcenters:\n', cc)
print('radii\n', r)
"""
Explanation: Lets do a manual check of the above interpolation value for grid 0 (southernmost grid)
Grab the circumcenters and radii for natural neighbors
End of explanation
"""
vor = Voronoi(list(zip(xp, yp)))
voronoi_plot_2d(vor)
nn_ind = np.array([0, 5, 7, 8])
z_0 = zp[nn_ind]
x_0 = xp[nn_ind]
y_0 = yp[nn_ind]
for x, y, z in zip(x_0, y_0, z_0):
plt.annotate('{}, {}: {:.3f} F'.format(x, y, z), xy=(x, y))
plt.plot(sim_gridx[0], sim_gridy[0], 'k+', markersize=10)
plt.annotate('{}, {}'.format(sim_gridx[0], sim_gridy[0]), xy=(sim_gridx[0] + 2, sim_gridy[0]))
plt.plot(cc[:, 0], cc[:, 1], 'ks', markersize=15, fillstyle='none',
label='natural neighbor\ncircumcenters')
for center in cc:
plt.annotate('{:.3f}, {:.3f}'.format(center[0], center[1]),
xy=(center[0] + 1, center[1] + 1))
tris = tri.points[tri.simplices[members[0]]]
for triangle in tris:
x = [triangle[0, 0], triangle[1, 0], triangle[2, 0], triangle[0, 0]]
y = [triangle[0, 1], triangle[1, 1], triangle[2, 1], triangle[0, 1]]
plt.plot(x, y, ':', linewidth=2)
plt.legend()
plt.axes().set_aspect('equal', 'datalim')
def draw_polygon_with_info(polygon, off_x=0, off_y=0):
"""Draw one of the natural neighbor polygons with some information."""
pts = np.array(polygon)[ConvexHull(polygon).vertices]
for i, pt in enumerate(pts):
plt.plot([pt[0], pts[(i + 1) % len(pts)][0]],
[pt[1], pts[(i + 1) % len(pts)][1]], 'k-')
avex, avey = np.mean(pts, axis=0)
plt.annotate('area: {:.3f}'.format(polygons.area(pts)), xy=(avex + off_x, avey + off_y),
fontsize=12)
cc1 = triangles.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = triangles.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info([cc[0], cc1, cc2])
cc1 = triangles.circumcenter((53, 66), (15, 60), (30, 30))
cc2 = triangles.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info([cc[0], cc[1], cc1, cc2], off_x=-9, off_y=3)
cc1 = triangles.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = triangles.circumcenter((15, 60), (8, 24), (30, 30))
draw_polygon_with_info([cc[1], cc1, cc2], off_x=-15)
cc1 = triangles.circumcenter((8, 24), (34, 24), (30, 30))
cc2 = triangles.circumcenter((34, 24), (53, 66), (30, 30))
draw_polygon_with_info([cc[0], cc[1], cc1, cc2])
"""
Explanation: Draw the natural neighbor triangles and their circumcenters. Also plot a Voronoi diagram
<https://docs.scipy.org/doc/scipy/reference/tutorial/spatial.html#voronoi-diagrams>_
which serves as a complementary (but not necessary)
spatial data structure that we use here simply to show areal ratios.
Notice that the two natural neighbor triangle circumcenters are also vertices
in the Voronoi plot (green dots), and the observations are in the the polygons (blue dots).
End of explanation
"""
areas = np.array([60.434, 448.296, 25.916, 70.647])
values = np.array([0.064, 1.156, 2.809, 0.225])
total_area = np.sum(areas)
print(total_area)
"""
Explanation: Put all of the generated polygon areas and their affiliated values in arrays.
Calculate the total area of all of the generated polygons.
End of explanation
"""
proportions = areas / total_area
print(proportions)
"""
Explanation: For each polygon area, calculate its percent of total area.
End of explanation
"""
contributions = proportions * values
print(contributions)
"""
Explanation: Multiply the percent of total area by the respective values.
End of explanation
"""
interpolation_value = np.sum(contributions)
function_output = nn_point(xp, yp, zp, (sim_gridx[0], sim_gridy[0]), tri, members[0], tri_info)
print(interpolation_value, function_output)
"""
Explanation: The sum of this array is the interpolation value!
End of explanation
"""
|
riddhishb/ipython-notebooks | Adaboost/Adaboost_Final note.ipynb | gpl-3.0 | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import time
start_time = time.time()
"""
Explanation: This is an jupyter notebook.
Lectures about Python, useful both for beginners and experts, can be found at http://scipy-lectures.github.io.
Open the notebook by (1) copying this file into a directory, (2) in that directory typing
jupyter-notebook
and (3) selecting the notebook.
Written By: Shashwat Shukla
In this exercise, we will learn and code about Adaboost algorithm and look at one of it's application.
Adaboost
Motivation
Before we dive into the details of the Adaboost algorithm, a brief overview of what Adaboost is and what it has to do with Machine Learning will prove useful to the uninitiated.
First of all, what are we trying to achieve here?
Basically, we are given a dataset. For each datapoint, we have measured and stored the value of some parameters of interest.
We have also assigned each datapoint a label. That is, the dataset that we have is already classified. It could have been classified by a human or the label could be assigned on the basis of experimental/empirical observation.
It's best explained by an example. So let's say we conducted a census in India. We choose our dataset to be a subset of all the census data that we have collected. We manually label all the data in our subset. Let's say that our subset contained a million people. Given that there are a billion people in India, we don't want to do the rest of the labelling by hand! This is where Adaboost comes in. We will used the data we have already labelled as training data and feed it into the Adaboost algorithm. Adaboost 'learns' from this data. Now if you feed it the rest of the census data, which was not labelled, it will generate those labels for you.
In the census example, each datapoint actually represents a person. The parameters of interest include height, weight, income, assets owned, marital status etc.
Here classification/labelling can mean anything: people can be classified into being above or below the poverty line. A more complex classification can be whether or not a person is eligible for a loan of (say) Rs. 10lakhs from a bank; more generally, whether a person is credit worthy.
Note that both examples are binary classifications. Meaning that the labels can only be one of two values, like a simple yes or no. This is important because Adaboost assumes, and only works on binary classsified data.
Alright, so Adaboost stands for Adaptive Boosting. So what is it boosting, and why is it called adaptive?
Let's go back to the census example and consider again the question of classifying the entire population of India as being eligible for a bank loan of Rs. 10 lakhs ie credit worthiness. I shall refer to this problem as credit worthiness henceforth.
Recall some of the parameters that we record for each person: height, weight, income, assets owned, marital status.
Let's pick one parameter, say income. How would you generate a classifier for our dataset using only this parameter?
That's easy: just pick a threshold say, an income of Rs 5 lakhs a year. This classifier will label any person with an income greater than 5 lakhs as being credit worthy and other people people as non credit worthy.
We could have generated a different classifier by simply choosing a different threshold. Like an income of 10 lakhs a year.
So here's the important point: a classifier can be uniquely generated by choosing a parameter, and a threshold for that parameter.
Back in the census example, you must have realised that some parameters are more important than others: height and weight don't really have much to do with credit worthiness. Income and assets owned are definitely very important. In the real world, banks sometimes also factor in marital status(as a sign of being responsible). Now the obvious question is, how do we quantitatively combine income, assets, marital status etc. How much weightage should income, assets, marital status get in determining the final labels? What thresholds do we choose for these paramters??
All of these questions can be answered in one go: Ask the data! We have already classified some of the data. The magic of Adaboost is that it extracts all of this information about weights and thresholds for us.
This is the "boosting" in Adaboost. It boosts the weightage of classifiers that are more relevant.
We need to define another term: weak classifiers. A weak classifier is just a classifier that is better than pure guessing. That is, it classifies more than half of the labelled data correctly. Even if it's success rate is just 51 % or even 50.01 %,, it still counts as a weak classifier.
A strong classifier is one that has a very good success rate, like 99% or whatever.
(The astute reader would point out that every strong classifier is also a weak classifier. That's really not the point. These are just heuristic definitiions used to convey obvious differences in success rates.)
Now we are in a position to understand the Adaboost algorithm:
1) Adaboost is iterative: In the first iteration, it picks the most accurate weak classifier. In the census example, it would most probably correspond to the parameter of income, with some threshold value(Note: classifiers that work with income as a parameter but have different threshold values are distinct classifiers).
2) So we have picked our first weak classifier. Now what? Well, this weak classifier must have obviously misclassified a lot of datapoints(a little less than half). The classifier that we pick in the next iteration should then be better at correctly classifying this misclassified data. How do we choose such a classifier? The way it works in Adaboost is that each data point is assigned a weight. In the first iteration, obviously all the datapoints had equal weightage. Then the weight of the misclassified datapoints is incremented, and that of correctly labelled data is decremented. So the error rate of the second classifier is calculated as a weighted sum: if it misclassifies an already misclassified datapoint, it gets a higher error rate.
3) In the second iteration, the classifier with the smallest error rate on the weighted data is chosen. Now we have chosen two weak classifiers. We again increment the weights of the datapoints that are collectively misclassified by the two weak classifiers, and decrement the weigths of the ones that they correctly classify.
4) Repeat this process of reassigning weights and picking weak classifiers on the weighted data until you have made the error rate as small as you like. Here we are referring to the error rate of the final strong classifier( what we form out of all the weak classifiers that we have picked so far).
So in a nutshell: Adaboost takes a lot of weak classifiers, assigns them appropriate weights, to make a strong classifier.
Okay. So far, we haven't encountered a single mathematical equation! Now that we understand what Adaboost does, we need to look at how it works.
Simulated Example
The best way to understand the math is to see it in action. Here we will code the adaboost algorithm and use it to classify some data.
First, we import some libraries.
End of explanation
"""
T = 20
dim = 2
N = 1000
x = np.random.randn(N, 2) # dim=2
label = np.zeros(N, dtype=np.int64)
# label = x[:,0] < x[:,1] #linear separation example
label = (x[:, 0]**2 + x[:, 1]**2) < 1 # nonlinear separation example
label = label * 1.0
pos1 = np.nonzero(label == 1)
pos2 = np.where(label == 0)
label[pos2] = -1
# Plot the data
plt.figure()
plt.plot(x[pos1, 0], x[pos1, 1], 'b*')
plt.plot(x[pos2, 0], x[pos2, 1], 'r*')
plt.axis([-3, 3, -3, 3])
plt.title("Original data")
"""
Explanation: Next, we initialise some variables that we will need:
-> N: The number of samples or data-points.
-> T: The number of iterations in our boosting algorithm.
-> dim: The number of parameters recorded for each data-point.
Unlike in the census example, we haven't actually collected any data or labelled it. So we will generate our own sample data and label it in a simple manner:
-> x: The data. It is an N x dim matrix.
-> label: N x 1 array that stores the known labels for each data-point. Here label belongs to {-1, 1}
Then we plot the original data. This is the learning dataset.
End of explanation
"""
temp = np.zeros(N, dtype=np.int64)
# Returns error and calculated labels corresponding to
def weakClassifier_error(i, j, k, x, weight, label):
# threshold i
# dimension j
# sign k on dataset x.
# Original labels are stored in
# label
temp_err = np.float64(0)
# Initialise actual and expected labels to a perfect match( 0 = match , 1
# = not a match)
y = np.zeros(N, dtype=np.int64)
if(k == 1):
temp = (x[:, j] >= i)
else:
temp = (x[:, j] < i)
temp = np.int64(temp)
temp[np.where(temp == 0)] = -1
y = np.int64(temp != label)
# Calculate error of this weak classifier on the weighted dataset
temp_err = np.sum(y * weight)
return [temp_err, y]
"""
Explanation: We discussed how a classifier can be defined by selecting a parameter and a threshold for this parameter.
The output of this classifier that we have defined would be +1 or -1 depending on whether the input's value for this parameter is greater than or smaller than the threshold, as we have discussed before.
But we had also talked about generating an error rate for weak classifiers in every iteration, on the weighted dataset.
The following lines of code define a function weak_Classifier_error() that takes a weak classifier, a labelled and weighted dataset as input.
It's output is the error rate for this weak classifier on the dataset for the given weights.
And the error rate, as mentioned before, just a weighted sum of the errors.
The weigths are: 0 if it is correctly classified and 1 if incorrectly classified.
This simplfies to simply:
Error rate= Sum of weigths of datapoints that were classified incorrectly
But wait you say: The arguments for this function are threshold, dimension, sign, weight and label. Where is the weak classifier? Remember, how we said that a classifier is uniquely defined by it's parameter and threshold values? So we just input these.
Also, the use of 'sign' is simple: It basically flips the classifier on it's head. If sign is +1, then it will classify data as +1 if it is greater than threshold. If sign is -1, it will classify data as +1 if it is smaller than threshold.
End of explanation
"""
# Actual program begins
# h and alpha together completely specify the final strong classifier
h = np.zeros([T, 3], dtype=np.float64)
alpha = np.zeros(T, dtype=np.float64)
threshold = np.arange(-3.0, 3.0, 0.1)
weight = np.ones(N, dtype=np.float64) / (N) # Initialise weights
# Initially set error to infinity, to allow comparing with error of classifiers
err = np.ones(T, dtype=np.float64) * np.inf
for t in range(T):
for i in threshold:
for j in range(dim):
for k in [-1, 1]:
[tmpe, y] = weakClassifier_error(i, j, k, x, weight, label)
if(tmpe < err[t]): # storing the better classifier in h
err[t] = tmpe
y0 = y
h[t][0] = i
h[t][1] = j
h[t][2] = k
if(err[t] > 0.5):
T = t
# We have run out of weak classifiers! So truncate the no: of
# iterations used
print t, "Error!"
break
alpha[t] = 0.5 * np.log((1.0 - err[t]) / err[t])
# y0=0 corresponded to correctly labelled datapoints. To reassign weights,
y0[np.where(y0 == 0)] = -1
# we need -1 and not 0 at these positions
weight = np.float64(weight * np.exp(alpha[t] * y0)) # Reassign weights
weight = weight / np.sum(weight) # Normalise reassigned weights
"""
Explanation: Now we can finally take a look at the actual code for the Adaboost algorithm.
We define some variables that we need:
-> h: Tx3 array that stores the weak classifiers selected after each iteration:
h[index][0]= threshold
h[index][1]= dim (data dimension)
h[index][2]= pos (the sign of the classifier, +1/-1)
-> alpha: T x 1 array that stores the weight of each weak classifier chosen to
make up the final classifier.
The point of 'h' is obvious. However, 'alpha' needs an explanation:
We said that the final classifier will be a combination of all the weak classifiers that we have selected over the iterations.
But what does combination mean exactly?
In Adaboost, the final classifier is a weighted sum of the weak classifiers that we have selected.
Now don't get confused: we have talked about weights before. We assigned weights to each datapoint, over each iteration, so that we could select a new classifier. These weigths are stored in weights[].
And the error rate was a weighted sum of the weights.
What's beautiful about Adaboost is that it generates the final output in an analogous manner.
The weigths we are talking about here are assigned to the weak classifiers themselves.
These weights are stored in alpha[].
The final output is the weighted sum of the outputs of each weak classifier.
Alright: so now we have two sets of weights: One set for the datapoints and one set for the weak classifiers themselves.
How are these weights determined?
We said that the weigths corresponding to the datapoints were incremented or decremented based on whether or not they were correctly classified, by the best weak classifier of that iteration.
How is this done mathematically?
First we calculate alpha[t] (refer to the code). Then we use it to reassign weights.
The expression for alpha[t] isn't very revealing at first glance. But it is what it is, and it works. We will try and take a look at it in more detail later.
The observant reader will immediately point out and maybe puzzled by the fact that just a few sentences ago I mentioned that the weights for the weak classifiers are stored in alpha[].
So is this is a different alpha? No it's not!
This is an especially fascinating aspect of Adaboost. The weights for the weak classifiers and the weigths for the datapoints are linked by alpha[].
The last line in the following code block renormalises the weights. This is necessary as we are reassigning weigths after each iteration.
End of explanation
"""
temp_sum = np.zeros(N, dtype=np.float64)
temp = np.zeros(N, dtype=np.float64)
final_label = np.zeros(N, dtype=np.float64)
misshits = np.zeros(T)
for t in range(T): # Calculate final labels
temp = h[t][2] * np.sign(x[:, h[t][1]] - h[t][0])
temp_sum = np.float64(temp_sum + alpha[t] * temp)
final_label = np.sign(temp_sum)
misshits[t] = np.sum(np.float64(final_label != label)) / N
# Now plot the generated labels
pos1 = np.where(final_label == 1)
pos2 = np.where(final_label == -1)
plt.figure()
plt.plot(x[pos1, 0], x[pos1, 1], 'b*')
plt.plot(x[pos2, 0], x[pos2, 1], 'r*')
plt.axis([-3, 3, -3, 3])
plt.title("Generated data")
plt.show()
# Plot miss hits when more and more weak learners are used
plt.figure()
plt.plot(misshits)
plt.ylabel('Miss hists')
print("--- %s seconds ---" % (time.time() - start_time))
"""
Explanation: We have finished selecting all the weak classifiers and their corresponding weights. This is all that we need to generate the final output.
In our code, we are not feeding it a new dataset. Instead, we are inputting the training dataset itself. This is an excellent way to see if our algorithm actually works. Because we can compare the original labels directly with the generated lables.
Alright, so we now proceed to generate the final output. We said that the final output is the weighted sum of the outputs of all the weak classifiers. The weights, as we know, are stored in alpha[]
We then proceed to plot the generated labels also.
We also plot 'misshits', which is a measure of how the accuracy of our final classifier improves when we add the weak classifiers one by one.
End of explanation
"""
"""
All matrices used are implemented via numpy.
The following variables are used:
-> N: The number of samples or data-points.
-> T: The number of iterations in our boosting algorithm.
-> dim: The number of parameters recorded for each data-point.
(for an image we can choose RGB intensities as features and then dim=3)
-> x: The data. It is an N x dim matrix.
-> label: N x 1 array that stores the known labels for each data-point.
-> final_label: Nx1 array that stores the labels generated for each data-point
by the final strong classifier.
-> weight: Nx1 array that stores the weight for each data-point.
-> h: Tx3 array that stores the weak classifiers selected after each iteration:
h[index][0]= threshold
h[index][1]= dim (data dimension)
h[index][2]= pos (the sign of the classifier, +1/-1)
-> alpha: T x 1 array that stores the weight of each weak classifier chosen to
make up the final classifier.
-> final_alpha: Stores the weights for all the digits.
-> final_h: Stores the classifiers for all the digits.
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import csv
start_time = time.time()
with open('images_training.txt', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
x = list(reader)
x = np.array(x, dtype=np.float64)
# this array is of size 13x13000, for all the 1000 13x13 images 100 for
# each digit 0 to 9
T = 20
dim = 169
N = 1000
"""
Explanation: That's all of it.
We have described all the mathematical details of Adaboost through our code. But with math comes proofs, and we haven't actually proven the convergence of the Adaboost algorithm. Neither have we derived the expression for alpha[t].
Proving convergence is pretty difficult. But you must know that the adaboost algorithm is guaranteed to converge to a strong classifier with an arbitrarily good success rate, given enough iterations(and some other caveats).
For some intuition into where that weird expression of alpha came from, refer to this link.
Application of Adaboost: Digit recognition
We have learnt how Adaboost works and have implemented the code for it as well.
With a few modifications, our code can be tailored into a full-blown application of Adaboost in a real and relevant problem; that of digit recognition. The problem is simple to state: create a program that takes an image of a digit from 0-9 as input and gives as output, the digit in the picture.
So how can we use Adaboost to solve this problem?
First, some specifics: The 'data' that we will use comprises of images that are 13 pixels wide and 13 pixels tall i.e 13x13. The training set will have 1000 images, as will the testing/validation set. Each image is stored as a 13x13 matrix. Each element of this matrix is a number from 0 to 1. Can you guess what the numbers correspond to? As is obvious, they represent the intensity of light in that particular pixel. Meaning that a value of zero corresponds to a black pixel and a value of 1 is a white pixel and any value in between will be varying shades of grey. This is the greyscale representation of an image.
The data files can be found here.
The next step is to define our weak classifiers. How do we do that?
Well, in our implementation, we will make an obvious choice: Each image comprises 13x13= 169 pixels. We have stored a greyscale for each pixel.
So the index for the weak classifier will simply be the index of the pixel that we are looking at. The weak classifier's threshold will be a greyscale value. As in our previous example, there will be also be a sign associated with our classifier.
So let's say that we are looking at the pixel with index (2,3) and have chosen a threshold greyscale value of 0.625 and the sign is +1. Then, if we give an image as input to this weak classifier, here's what will happen: Let this image have a value of 0.433 at the index (2,3). Thus, the output of our classifier will be -1 (as the sign is +1).
We compare our data with our previous code to conclude that here,
N = 1000 (as we have a 1000 datapoints i.e images)
dim = 169 (each image has 169 pixels and each one of them is a dimension of data)
We now come to a practical consideration: how are the images stored? This might seem weird at first, but they the 1000 images are stored in a 13x13000 array. Why 13x13000? Well, think of the images being placed side-by-side. So the length of all the images put together is simply the sum of their lengths and hence 13x1000 = 13000. The breadth is simply 13.
It is emperical that you understand how the images are stored before you proceed further.
We previously said that dim = 169. So how will you pick the pixel in an image corresponding to say, the dimension 155?
As you might have guessed, you will look at the row-major form of the image.
Hence, row corresponding to 155 will be int(155 / 13) and the column will be 155 % 13.
Now that we have taken a look at all the specific details, we can now see Adaboost in action with the corresponding code!
End of explanation
"""
temp = np.zeros(N, dtype=np.int64)
# Returns error and calculated labels corresponding to
def weakClassifier_error(i, j, k, x, weight, label):
# threshold i
# dimension j
# sign k on dataset x.
# Original labels are stored in
# label
j_row = j / 13
j_col = j % 13
temp_err = np.float64(0)
# Initialise actual and expected labels to a perfect match( 0 = match , 1
# = not a match)
y = np.zeros(N, dtype=np.int64)
if(k == 1):
temp = (x[j_row, j_col:13000:13] >= i)
else:
temp = (x[j_row, j_col:13000:13] < i)
temp = np.int64(temp)
temp[np.where(temp == 0)] = -1
y = np.int64(temp != label)
# Calculate error of this weak classifier on the weighted dataset
temp_err = np.sum(y * weight)
return [temp_err, y]
"""
Explanation: As discussed, this time around the dim = 169.
We have imported the csv module to open the image.
End of explanation
"""
# Actual program begins
threshold = np.arange(0, 1.0, 0.05)
# h and alpha together completely specify the final strong classifier
final_alpha = np.zeros((10, T), dtype=np.float64)
final_h = np.zeros((10, T, 3), dtype=np.float64)
for p in range(10):
h = np.zeros([T, 3], dtype=np.float64)
alpha = np.zeros(T, dtype=np.float64)
temp = np.zeros(N, dtype=np.int64)
label = np.zeros(N, dtype=np.int64)
label = label * 1.0
label[p * 100: p * 100 + 100] = 1
label[np.where(label == 0)] = -1
weight = np.ones(N, dtype=np.float64) / (N) # Initialise weights
# Initially set error to infinity, to allow comparing with error of
# classifiers
err = np.ones(T, dtype=np.float64) * np.inf
for t in range(T):
for i in threshold:
for j in range(dim):
for k in [-1, 1]:
[tmpe, y] = weakClassifier_error(i, j, k, x, weight, label)
if(tmpe < err[t]): # storing the better classifier in h
err[t] = tmpe
y0 = y
h[t][0] = i
h[t][1] = j
h[t][2] = k
if(err[t] > 0.5):
T = t
# We have run out of weak classifiers! So truncate the no: of
# iterations used
print t, "Error!"
break
alpha[t] = 0.5 * np.log((1.0 - err[t]) / err[t])
# y0=0 corresponded to correctly labelled datapoints. To reassign
# weights,
y0[np.where(y0 == 0)] = -1
# we need -1 and not 0 at these positions
weight = np.float64(weight * np.exp(alpha[t] * y0)) # Reassign weights
weight = weight / np.sum(weight) # Normalise reassigned weights
final_alpha[p] = alpha
final_h[p] = h
"""
Explanation: Notice how the definition of weakclassifier_error hasn't changed much. Only the "dim" has been complicated a bit.
To remind you, we previously said that dim = 169. So how will you pick the pixel in an image corresponding to say, the dimension 155? We will look at the row-major form of the image. Hence, row corresponding to 155 will be int(155 / 13) and the column will be 155 % 13.
Here, j_row and j_col do just that.
Also, x[j_row, j_col:13000:13] returns a list that contains the pixel value at the position (j_row, j_col). This list contains a 1000 elements, corresponding to our 1000 training images.
End of explanation
"""
with open('images_training.txt', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
x = list(reader)
x = np.array(x, dtype=np.float64)
temp_sum = np.zeros((10, N), dtype=np.float64)
temp = np.zeros(N, dtype=np.float64)
final_label = np.zeros((10, N), dtype=np.float64)
misshits = np.zeros(T)
for p in range(10):
label[100 * p: 100 * p + 100] = p
label = np.int64(label)
all_label = np.full(N, -1, dtype=np.int64)
for t in range(T): # Calculate final labels
for p in range(10):
row = final_h[p][t][1] / 13
col = final_h[p][t][1] % 13
temp = final_h[p][t][2] * \
np.sign(x[row, col: 13000: 13] - final_h[p][t][0])
temp_sum[p] = np.float64(temp_sum[p] + final_alpha[p][t] * temp)
final_label[p] = np.sign(temp_sum[p])
for p in range(10):
all_label[np.where(final_label[p] == 1)] = p
misshits[t] = np.sum(np.float64(all_label != label)) / N
plt.figure()
plt.plot(misshits)
plt.ylabel('Miss hists')
plt.show()
print("--- %s seconds ---" % (time.time() - start_time))
"""
Explanation: Compare this codeblock with the code for our toy example. You will notice that the only difference is that we have an additional for p in range(10).
So basically, because now we have to classify digits, we will build a seperate strong classifiers for each digit from 0 to 9. That is why we are running our Adaboost algorithm 10 times.
In each of theses iterations, we are storing the strong classifier in the arrays final_alpha and final_h.
End of explanation
"""
|
encima/Comp_Thinking_In_Python | Session_2/2_Coding.ipynb | mit | # Does this make sense without comments?
with open('myfile.csv', 'rb') as opened_csv:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
print (', '.join(row))
# How about this?
#open csv file in readable format
with open('myfile.csv', 'rbU') as opened_csv:
# read opened csv file with spaces as delimiters
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
# loop through and print each line
for row in spamreader:
print (', '.join(row))
"""
Explanation: Coding in Python
Dr. Chris Gwilliams
gwilliamsc@cardiff.ac.uk
Writing in Python: PEP
Python Enhancement Proposals
Unsure how your code should be written? PEP is a style guide for Python and provides details on what is expected.
Use 4 spaces instead of tabs
Lines should be 79 characters long
Variables should follow snake_case
All lower case words, separated by underscores (_)
Classes should be Capitalised Words (MyClassExample)
PEP
Comments
Sometimes, you need to describe your code and the logic may be a bit complicated, or it took you a while to figure it out and you want to make a note.
You can't just write some text in the file or you will get errors, this is where comments come in! Comments are descriptions that the Python interpreter ignores.
Just type a # amd what ever you want to write and voíla!
It is ALWAYS a good idea to comment your code!
End of explanation
"""
"Gavin" #String Literal
4 #Integer Literal
"""
Explanation: Types
Python has a type system (variables have types), even if you do not specify it when you declare them.
String
Float
Boolean
Integer
None
Exercise
Give me an example of each of these types.
What is the None type?
Use the Internet to find me examples of 1 other type in Python
We will come back to type as the course progresses.
Literals
Literally a value.
All of the examples you just gave are literals.
End of explanation
"""
# declared and instantiated
name = "Gavin"
# declared, but not instantiated
new_name = None
"""
Explanation: 3.14 is a float literal
Variables
Literals are all well and good for printing but what about when we need to change and store these literals?
Variables are ways of giving literal values a name in order to refer to them later.
Below, we are declaring and instantiating a variable
End of explanation
"""
x # does not exist so cannot print it
x = 1
print(x)
"""
Explanation: Variables in Python
Technically, Python does not have empty variables that are not instantiated. A variable exists as soon as it is assigned a value. Like this:
End of explanation
"""
a = 1
b = 1
c = 1
print(id(a))
print(id(b))
"""
Explanation: Variables in Python
In many languages, when a variable is instantiated, it is reserved into a block of memory and the variable points to that memory address, often unique for that variable.
In Python, it is more like that memory address is tagged with the variable name. So, if we create three variables that have the same literal value, then they all point to the same memory address. Like so:
End of explanation
"""
a,b,c = 1,1,1
name, age, yob = "chris", 26, 1989
print(name, age, yob)
print(id(name), id(age), id(yob))
a,b,c = "Name",12,"6ft"
print(a,b,c) #NOTE: always balance the left and the right. 5 variables must have 5 values!
"""
Explanation: id and Multi Variable Assignment
The id function is built into Python and returns the memory address of variables provided to it.
This is easier to see when we use multi-variable assignment in Python:
End of explanation
"""
print("Hello " + "World") #ok
print("hello" + 5) #strongly typed means this cannot happen!
name = "Chris"
name = "Pi"
"pi" + 6 #Strongly typed means no adding different types together!
name = 3.14 #dynamically typed means yes to changing the type of a variable!
"""
Explanation: Don't Call It That:
These are keywords reserved in Python, so do not name any of your variables after these! You will learn about what many of these do throughout this course.
| False | class | finally | is | return |
|----------|--------|----------|-------|--------|
| continue | for | lambda | try | True |
| def | from | nonlocal | while | and |
| del | global | not | with | as |
| elif | if | or | yield | assert |
| else | import | pass | break | except |
| in | raise | None | | |
Useful links on Python variables
http://python.net/~goodger/projects/pycon/2007/idiomatic/handout.html#other-languages-have-variables
http://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/variables.html
http://foobarnbaz.com/2012/07/08/understanding-python-variables/
http://www.diveintopython.net/native_data_types/declaring_variables.html
Types II
Python is Strongly Typed - The Python interpreter keeps track of all the variables and their associated types.
AND
Python is Dynamically Typed - Variables can be reassigned from their types. A variable is simply a value bound to a name, the variable does not hold a type, only the value does.
End of explanation
"""
type('am I your type?')
"""
Explanation: Recap Questions
What is a REPL?
What does strongly typed mean?
What does dynamically typed mean?
What is a literal?
Variable Recap
Give me 3 types in Python
What would you use each type for?
How do you declare multiple variables at the same time?
What is wrong with the code below?
name, age, height = 12, 'Terry'
Read, Eval, Print, Loop
Strongly typed is that type errors are caught as errors and Python keeps track of types of variables
Dynamically typed means that variables do not need a type declared at their declaration and the type can be changed at runtime
A literal is literally a value
Boolean - Store flags, String - hold text, Float - decimal numbers
Separate them with a comma
The sides do not match up, there is a missing literal
Finding Types
Got some code and don't know what the types are?
Python has some functions to help with this.
End of explanation
"""
'single_quotes'
"""
Explanation: Exercise
Try this with different types, how many can you find?
What happens when you do type([])?
type is an example of a built-in function, we will come back to these in a few sessions.
Strings
Strings in Python are unlike many languages you have seen before.
They can be:
End of explanation
"""
'isn't # not gonna work
'isn\'t' # works a charm!
"""
Explanation: What do single quotes usually mean in most languages?
Strings wrapped in 'single quotes' are typically chars (single characters). Python does not have this type.
char yes = 'Y' //char (Python does not have this)
string no = "no" //string
What are chars used for? What does Python have instead?
Chars are typically used as single character flags, like a 'Y' or an 'N' as an answer to a question, or to hold an initial.
Anything text based can be stored in a string but flags can be represented as a 0 or 1 or even using a Boolean value,, which is easiest to check against.
What happens if you use a single quote for strings and you write the word don't in the string? Try it out now!
How do we get around that?
Escaping Strings
When you want to include special characters (like ') then it is always good to escape them!
Ever seen a new line written as \n? That is an example of escaping.
Escape Character
An escape character is a character which invokes an alternative interpretation on subsequent characters in a character sequence.
This is pretty much always \
End of explanation
"""
print("Got something to say?")
print("Use the print statement")
print("to print a string literal")
print("float literal")
print(3.14)
more_string = "or variable"
print(more_string)
"""
Explanation: Strings II
If you do not want to escape every special character, maybe there is a better way?
python
"I am a string and I don't care what is written inside me"
"""I am a string with triple double quotes and I can
run across multiple lines"""
Double quotes is generally better as you do not have to escape these special characters.
Exercise
Declare a variable to store a boolean literal
Declare and instantiate a new variable that stores your age
The first one was a trick! Declaring a new variable means giving it no value!
The second one would be: age = 20
Reassigning Variables
It would not be fun to have to create a new variable for every thing you want to store, right?
As well as being a huge inconvenience, it is actually really inefficient.
```python
age = 40
1 year passes
age = 41
```
Easy, right? By using the same variable name, it is now associated with your new value and the old value will be cleared up.
Printing
We have seen this print keyword thrown around alot, right? This is the best way to show some information.
Especially useful if your script takes a long time to run!
E.g. print("stuff")
End of explanation
"""
float_type = 3.0
int_type = 5
print(int_type + float_type)
string_type = "hello"
print(string_type + float_type) # what is the error?
bool_type = True
print(string_type + bool_type)
print(int_type + bool_type) # does this work? Why?
"""
Explanation: Exercise
Create a variable of type string and then reassign it to a float literal.
Now try adding a float literal to your variable
Operators
What is an operator? What do you remember from maths?
+ (add)
- (subtract)
/ (divide)
* (multiply)
There are more, but we will get to them!
Exercise
Add 3 and 4
Add 3.14 + 764 (what is the difference to the above answer?)
Subtract 100 from 10
Multiply 10 by 10
Add 13 to 'adios'
Multiply 'hello' by 5
Divide 10 by 3
Divide 10 by 3 but use 2 / (what happens?)
In Python 3, // is floor division and / is floating point division
Adding/Dividing/Subtracting Across Types
End of explanation
"""
float_type = 3.0
int_type = 5
print(int_type * float_type)
string_type = "hello"
print(string_type * int_type)
bool_type = True
print(string_type * bool_type) #why does this work?
print(int_type * bool_type)
"""
Explanation: Multiplying Across Types
While Python is not happy to add/divide/subtract numbers from strings, it is more than happy to multiply
End of explanation
"""
year = year + 1
year += 1
"""
Explanation: Operating and Assigning
You may have a variable and want to change the value, this is reassigning, right?
python
year = 1998
year = 1999
There has to be an easier way. THERE IS. What is it?
End of explanation
"""
dir("")
"""
Explanation: Exercise
Try this with all the operators you know.
Built in Functions
A function is a block of code that:
- receives an input(s) (also known as arguments)
- performs an operation (or operations)
- Optionally, returns an output
Python has some built-in functions and we have used one already. What was it?
Functions - Print
print("Hello")
Format:
- function_name
- Open brackets
- inputs (separated, by, commas)
- Close brackets
Sometimes, inputs are optional. Not always. We will get to this.
Other Built in Functions
str()
len()
type()
int()
Exercise
Find out what the above functions do and use them in a script
What happens when you don't give each an argument?
Look up and write up definitions for the id and isinstance functions
str - converts an object to a string type
len - prints the length of an object
type - tells you the type of a literal or a variable
int - converts a type to an integer
id - a unique id that relates to where the item is stored in memory
isinstance - checks if an object if of the supplied type
Functions to Help You
dir
End of explanation
"""
help(int)
help("")
help(1)
"""
Explanation: help
End of explanation
"""
|
ctk3b/imolecule | examples/ipython.ipynb | mit | import imolecule
imolecule.draw("CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C")
"""
Explanation: imolecule in the IPython notebook
I created imolecule to fix a deficiency in my workflow. While my chemical simulations were entirely in notebooks, I had to use external programs like mercury to visually debug chemical structures. Mercury is a good program, but dropping out of the notebook felt clumsy and made my work less reproducible.
This is my solution. It's a short javascript file that makes heavy use of three.js to render 3D shapes. This code is connected to the notebook with a simple Python API, and can be used like so:
End of explanation
"""
imolecule.draw("data/NaX.cif")
"""
Explanation: It can also load files directly, automatically using the file extension to determine format.
End of explanation
"""
import imolecule
imolecule.draw("data/NaX.cif", drawing_type="space filling", camera_type="orthographic", size=(800, 600))
"""
Explanation: When viewing crystal pores, you usually want to use an orthographic camera and a space-filling model. You may also want to make the initial window size bigger for static viewing.
End of explanation
"""
from IPython.display import display, HTML
carbons = ("c1{}c1".format("c" * i) for i in range(3, 7))
shaders = ("basic", "lambert", "phong", "toon")
renders = (imolecule.draw(c, size=(200, 150), shader=s, display_html=False) for c, s in zip(carbons, shaders))
columns = ('<div class="col-xs-6 col-sm-3">{}</div>'.format(r) for r in renders)
display(HTML('<div class="row">{}</div>'.format("".join(columns))))
"""
Explanation: For advanced usage, create highly customized outputs by combining with your own html.
End of explanation
"""
help(imolecule.draw)
"""
Explanation: Read the docs for more.
End of explanation
"""
import pybel
pybel.ipython_3d = True
sarin = pybel.readstring("smi", "CCOP(=O)(C)SCCN(C(C)C)C(C)C")
sarin
"""
Explanation: If you use open babel, you may already have imolecule! It has been in the development version of open babel since September 2013. Set the pybel.ipython_3d variable to True, and open babel will render your molecules.
End of explanation
"""
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.Draw import IPythonConsole
IPythonConsole.ipython_3d = True
taxol = ("CC(=O)OC1C2=C(C)C(CC(O)(C(OC(=O)c3ccccc3)C4C5(COC5CC(O)C4(C)C1=O)"
"OC(=O)C)C2(C)C)OC(=O)C(O)C(NC(=O)c6ccccc6)c7ccccc7")
mol = Chem.AddHs(Chem.MolFromSmiles(taxol))
AllChem.EmbedMolecule(mol)
AllChem.MMFFOptimizeMolecule(mol)
mol
"""
Explanation: If you use RDKit, you may also have imolecule! The code has been in RDKit since May 2014.
End of explanation
"""
|
csaladenes/csaladenes.github.io | present/gtk/test.ipynb | mit | import pandas as pd
import html5lib
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: GTK adatvizualizációs kurzus
Bővítőcsomagok importálása:
End of explanation
"""
csv_path='http://www.csaladen.es/present/sapientia1/exportPivot_POP105A.csv' #SAJAT HELY CSV FILE
df=pd.read_csv(csv_path)
df.head()
"""
Explanation: Romániai lakosság letöltése INSSE-ról:
Előzetesen letöltött fájlútvonalt használunk.
End of explanation
"""
wiki_path="http://hu.wikipedia.org/wiki/Csíkszereda"
"""
Explanation: Wikipédia táblázatok letöltése
End of explanation
"""
df2=pd.read_html(wiki_path)
df2[4]
"""
Explanation: Ha html5llib not found hibaüzenetet kapunk, akkor egy konzol (Command Prompt, Parancssor) megnyitásával és a conda install html5lib vagy pip install html5lib parancsokal telepítjük. Ezután újra kell indítani a Jupyter-t.
End of explanation
"""
gf=df2[4]
gf
"""
Explanation: A táblázatlistából nincsen szükség csak a 5. (tehát 4-es indexű, 0-tól kezdődik) táblázatra. Ezt mentsük el az gf változóba, aminek a típusa egy pandas dataframe lesz.
End of explanation
"""
ef=gf[1:4]
ef.columns=ef.loc[ef.index[0]]
ef=ef.drop(1)
ef=ef.set_index(ef.columns[0])
ef=ef.drop(u'Év',axis=1)
ef
"""
Explanation: Csak az 1-től 4-ig terjedő sorok van szükség, a többit eldobjuk.
Ezután a 0. sort beállítjuk indexnek. Miután ez megtörtént, ezt is eldobjuk a sorok közül.
End of explanation
"""
rf=ef.T
rf.head(2)
"""
Explanation: Transzponáljuk a táblázatot:
End of explanation
"""
#uj=[[] for i in range(len(rf.columns))]
d3=[]
ujnevek=['ujmax','ujmin']
for k in range(len(rf.index)):
i=rf.index[k]
seged={}
for j in range(len(rf.loc[i])):
uc=unicode(rf.loc[i][j])
if ',' in uc:
ertek=-int(uc[1:-2])
else:
ertek=int(uc[0:-1])
#uj[j].append(ertek)
seged[ujnevek[j]]=ertek
seged["honap"]=rf.index[k]
seged["honap2"]=k+1
d3.append(seged)
"""
Explanation: D3plus-ba betölthető json formátumban elmentjük a táblázat tartalmát.
Ezt úgy érhetük el, hogy végigmegyunk a táblázat értékein minden sorban majd minden oszlopban. Vigyázzunk a magyar karaterekre, ezért fontos az unicode rendszerbe való konvertálás.
A táblázatban tárlot értékek string-ek, ezeket egész számokká konvertáljuk, figyelembe véve a pozitív/negatív értékek formátumát.
End of explanation
"""
d3
"""
Explanation: Az eredmény:
End of explanation
"""
import json
file('uj.json','w').write(json.dumps(d3))
"""
Explanation: Elmentjük a fájlt:
End of explanation
"""
|
4dsolutions/Python5 | Dates3.ipynb | mit | import pandas as pd
from pandas import DataFrame, Series
import numpy as np
rng = pd.date_range('3/9/2012 9:30', periods=6, freq='D')
rng
type(rng)
rng2 = pd.date_range('3/9/2012 9:30', periods=6, freq='M')
rng2
ts = Series(np.random.randn(len(rng)), index=rng)
type(ts)
ts
ts.index.tz
rng.tz
ts_utc = ts.tz_localize('UTC')
ts_utc.index.tz
ts_utc
ts_pacific = ts_utc.tz_convert('US/Pacific')
ts_pacific
from IPython.display import YouTubeVideo
YouTubeVideo("k4EUTMPuvHo")
ts_eastern = ts_pacific.tz_convert('US/Eastern')
ts_eastern
ts_berlin = ts_pacific.tz_convert('Europe/Berlin')
ts_berlin
"""
Explanation: Pandas and Datetimes
Pandas helps ease the pain of timezones, even as it provides many useful tools for generating DateTimeIndex based time Series.
End of explanation
"""
stamp = pd.Timestamp('2011-03-12 04:00')
stamp2 = pd.Timestamp('Wed May 23 11:35:54 2018') # will this work too?
type(stamp2)
stamp2_pac = stamp2.tz_localize('US/Pacific')
stamp2_pac
stamp2_pac.tz_convert('Europe/Moscow')
stamp2_pac.value # nanoseconds since the UNIX Epoch, Jan 1 1970
stamp2_pac.tz_convert('Europe/Moscow').value
stamp3 = pd.Timestamp('Wed May 23 11:35:54 1950')
stamp3.value # negative number because before the UNIX Epoch
ts
ts_sum = ts_eastern + ts_utc.tz_convert("Europe/Moscow")
ts_sum.index
"""
Explanation: Timestamp type (for individual datetimes)
End of explanation
"""
pd.Timestamp.now(tz='US/Pacific') # getting you started
"""
Explanation: LAB CHALLENGE
What time is it right now in:
Moscow
Berlin
Tokyo
End of explanation
"""
|
CalPolyPat/phys202-2015-work | assignments/assignment05/InteractEx03.ipynb | mit | %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
"""
Explanation: Interact Exercise 3
Imports
End of explanation
"""
def soliton(x, t, c, a):
return .5*c*(1/np.cos(c**.5*.5*(x-c*t-a)))**2
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
"""
Explanation: Using interact for animation with data
A soliton is a constant velocity wave that maintains its shape as it propagates. They arise from non-linear wave equations, such has the Korteweg–de Vries equation, which has the following analytical solution:
$$
\phi(x,t) = \frac{1}{2} c \mathrm{sech}^2 \left[ \frac{\sqrt{c}}{2} \left(x - ct - a \right) \right]
$$
The constant c is the velocity and the constant a is the initial location of the soliton.
Define soliton(x, t, c, a) function that computes the value of the soliton wave for the given arguments. Your function should work when the postion x or t are NumPy arrays, in which case it should return a NumPy array itself.
End of explanation
"""
tmin = 0.0
tmax = 10.0
tpoints = 1000
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
"""
Explanation: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays:
End of explanation
"""
phi=np.empty((xpoints, tpoints))
for i in range(xpoints):
phi[i,:] = soliton(x[i], t[:], c, a)
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
"""
Explanation: Compute a 2d NumPy array called phi:
It should have a dtype of float.
It should have a shape of (xpoints, tpoints).
phi[i,j] should contain the value $\phi(x[i],t[j])$.
End of explanation
"""
def plot_soliton_data(i=0):
plt.plot(x, phi[:, t[i]])
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
"""
Explanation: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
End of explanation
"""
interact(plot_soliton_data, i=(0,999, 1))
assert True # leave this for grading the interact with plot_soliton_data cell
"""
Explanation: Use interact to animate the plot_soliton_data function versus time.
End of explanation
"""
|
GoogleCloudPlatform/vertex-pipelines-end-to-end-samples | pipelines/schema_creation.ipynb | apache-2.0 | !pip install tensorflow-data-validation==1.3.0
import tensorflow_data_validation as tfdv
from google.cloud import bigquery
"""
Explanation: Requirements
End of explanation
"""
# GCP project id
PROJECT_ID = '<project_id>'
# BQ dataset id
DATASET_ID = '<dataset_id>'
# dataset location
DATA_LOCATION = '<location>' # e.g. "EU"
# source table name to extract sample from
SOURCE_TABLE = '<training_input_table_id>'
# sample table id suffix
SAMPLE_TABLE = SOURCE_TABLE + '<table_suffix>' # e.g. "_sample"
# full source table name
SOURCE_TABLE_WITH_PROJECT_ID = PROJECT_ID + '.' + DATASET_ID + '.' + SOURCE_TABLE
# full sample table names
SAMPLE_TABLE_WITH_PROJECT_ID = PROJECT_ID + '.' + DATASET_ID + '.' + SAMPLE_TABLE
SAMPLE_TABLE_WO_PROJECT_ID = DATASET_ID + '.' + SAMPLE_TABLE
# GCS path to schema folder
GCS_BASE_DIR = 'gs://<bucket>/<schema_folder>/'
# GCS table name
GCS_TABLE = '<sample_table.csv>'
# full GCS sample table path
GCS_SAMPLE_TABLE = GCS_BASE_DIR + GCS_TABLE
# sample table size
# should be defined such that the sample table provides adequate representation of distributions of numeric features
SAMPLE_SIZE = 1000
# name of label column for serving
LABEL_COLUMN_NAME = '<label_column_name>' # e.g. "total_fare"
# features to check for skew with corresponding threshold values
SKEW_THRESHOLD = {
"<feature_1>": 0.01,
"<feature_2>": 0.01
}
# the threshold values should be defined in
# L infinity norm for categorical features
# jensen shannon divergence for numeric features
"""
Explanation: <h1> Define constants
Modify project-specific constants bracketed below
End of explanation
"""
!bq --location=$DATA_LOCATION query \
--use_legacy_sql=false \
--destination_table=$SAMPLE_TABLE_WITH_PROJECT_ID \
--replace=true \
'CREATE OR REPLACE TABLE {SAMPLE_TABLE_WITH_PROJECT_ID} AS (SELECT * FROM `{SOURCE_TABLE_WITH_PROJECT_ID}` LIMIT {SAMPLE_SIZE})'
"""
Explanation: <h1> Create a sample of the preprocessed dataset </h1>
Extract a subset from the source table and write to a sample table in BigQuery
End of explanation
"""
!bq --location=$DATA_LOCATION extract \
$SAMPLE_TABLE_WO_PROJECT_ID \
$GCS_SAMPLE_TABLE
"""
Explanation: <h1>Extract the sample to GCS</h1>
Save the sample table to a cloud bucket as a csv file
End of explanation
"""
stats = tfdv.generate_statistics_from_csv(
data_location=GCS_SAMPLE_TABLE,
output_path=GCS_BASE_DIR+'sample_stats.pb'
)
"""
Explanation: <h1>Generate statistics from sample</h1>
Generate statistics from the csv file and write to sample_stats.pb in the same bucket folder
End of explanation
"""
schema = tfdv.infer_schema(stats)
"""
Explanation: Infer schema from sample statistics
End of explanation
"""
# loop over each categorical feature
for f in schema.string_domain:
# take the schema of feature f to be modified
# clear existing domain of f
while len(f.value)>0:
f.value.pop()
# query full domain values from original data
QUERY = (f'SELECT DISTINCT {f.name} FROM `{SOURCE_TABLE_WITH_PROJECT_ID}` WHERE {f.name} IS NOT NULL')
query_job = bigquery.Client().query(QUERY)
rows = query_job.result()
# append full list of values to the cleared domain of f
for row in rows:
new_value = list(row.values())
f.value.extend(new_value)
"""
Explanation: The sample table should provide an adequate representation of the numeric features in terms of data schema. However, it would only contain a small portion of all possible domain values of categorical features due to its size. Therefore, the full domains of categorical features need to be generated from the original (preprocessed) dataset. The domain values in the schema then need to be cleared and re-populated with the new values.
Modify schema to inclue full string domain of categorical features
End of explanation
"""
tfdv.write_schema_text(
schema=schema,
output_path=GCS_BASE_DIR+'tfdv_schema_training.pbtxt'
)
"""
Explanation: Write schema to text
Write to tfdv_schema_training.pbtxt in the same bucker folder
End of explanation
"""
# get a list of names of categorical features
cat_feat = [f.name for f in schema.string_domain]
# add skew comparator for features in SKEW_THRESHOLD
for feature in SKEW_THRESHOLD:
f = tfdv.get_feature(schema, feature)
if feature in cat_feat:
# use infinity_norm for categorical features
f.skew_comparator.infinity_norm.threshold = SKEW_THRESHOLD[feature]
else:
# use jensen_shannon_divergence for numeric features
f.skew_comparator.jensen_shannon_divergence.threshold = SKEW_THRESHOLD[feature]
"""
Explanation: <h1>Generate serving schema based on the training schema</h1>
Specify skew comparators for serving data validation
End of explanation
"""
# define TRAINING and SERVING environments
schema.default_environment.append('TRAINING')
schema.default_environment.append('SERVING')
# specify that the label column is not in SERVING environment
tfdv.get_feature(schema, LABEL_COLUMN_NAME).not_in_environment.append('SERVING')
"""
Explanation: Specify pipeline environments
End of explanation
"""
tfdv.write_schema_text(
schema=schema,
output_path=GCS_BASE_DIR+'tfdv_schema_serving.pbtxt'
)
"""
Explanation: Write the modified schema to tfdv_schema_serving.pbtxt in the same bucker folder
End of explanation
"""
|
atulsingh0/MachineLearning | scikit-learn/Matplotlib_Tutorial_01.ipynb | gpl-3.0 | # import
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
"""
Explanation: Matplotlib tutorial 01
End of explanation
"""
X = [1, 2.4, 5, 7, 3.2]
plt.plot(X)
plt.show()
"""
Explanation: Simple Plot
By default, matplotlib is plotting line which joins all the points
End of explanation
"""
X = [1, 2.4, 5, 7, 3.2]
plt.plot(X, linestyle='--') # dashed line
plt.show()
X = [1, 2.4, 5, 7, 3.2]
plt.plot(X, linestyle='-.') # dashed dot line
plt.show()
"""
Explanation: We can add different parameters to change the look of charts such as linestyle, linewidth or marker.
Let's check them one by one -
Adding linestyle
End of explanation
"""
X = [1, 2.4, 5, 7, 3.2]
plt.plot(X, linestyle='--', linewidth=4) # dashed dot line
plt.show()
"""
Explanation: Adding linewidth
End of explanation
"""
X = [1, 2.4, 5, 7, 3.2]
plt.plot(X, marker='p') # added mark point
plt.show()
"""
Explanation: Adding marker
End of explanation
"""
days = list(range(0, 22, 3))
celsius_values = [25.6, 24.1, 26.7, 28.3, 27.5, 30.5, 32.8, 33.1]
plt.plot(days, celsius_values)
plt.show()
days = list(range(0, 22, 3))
celsius_values = [25.6, 24.1, 26.7, 28.3, 27.5, 30.5, 32.8, 33.1]
plt.plot(days, celsius_values, linestyle='--', color='g' )
plt.show()
days = list(range(0, 22, 3))
celsius_values = [25.6, 24.1, 26.7, 28.3, 27.5, 30.5, 32.8, 33.1]
plt.plot(days, celsius_values, 'og' ) # first char o is repesting the circle, #g is color green
plt.show()
days = list(range(0, 22, 3))
celsius_values = [25.6, 24.1, 26.7, 28.3, 27.5, 30.5, 32.8, 33.1]
plt.plot(days, celsius_values, 'og', linestyle='-') # first char o is repesting the circle, #g is color green
plt.show()
"""
Explanation: line style or marker
character | description
-----------|-------------------------
'-' | solid line style
'--' | dashed line style
'-.' | dash-dot line style
':' | dotted line style
'.' | point marker
',' | pixel marker
'o' | circle marker
'v' | triangle_down marker
'^' | triangle_up marker
'<' | triangle_left marker
'>' | triangle_right marker
'1' | tri_down marker
'2' | tri_up marker
'3' | tri_left marker
'4' | tri_right marker
's' | square marker
'p' | pentagon marker
'*' | star marker
'h' | hexagon1 marker
'H' | hexagon2 marker
'+' | plus marker
'x' | x marker
'D' | diamond marker
'd' | thin_diamond marker
'|' | vline marker (pipe)
'_' | hline marker
End of explanation
"""
days = list(range(0, 22, 3))
celsius_values = [25.6, 24.1, 26.7, 28.3, 27.5, 30.5, 32.8, 33.1]
plt.plot(days, celsius_values, 'og', linestyle='-')
plt.xlabel("days")
plt.ylabel("celsius values")
plt.title("Temprature Stats")
plt.show()
"""
Explanation: Labels on Axes and Title
End of explanation
"""
days = list(range(1,9))
celsius_min = [19.6, 24.1, 26.7, 28.3, 27.5, 30.5, 32.8, 33.1]
celsius_max = [24.8, 28.9, 31.3, 33.0, 34.9, 35.6, 38.4, 39.2]
plt.xlabel('Day')
plt.ylabel('Degrees Celsius')
plt.plot(days, celsius_min,
days, celsius_min, "oy",
days, celsius_max,
days, celsius_max, "or")
plt.show()
"""
Explanation: We can specify an arbitrary number of x, y, fmt groups in a plot function. In the following example, we use two different lists of y values:
End of explanation
"""
|
chi-hung/notebooks | Docker_Basics.ipynb | mit | instead running an interactive shell, one can initiaa
"""
Explanation: I use this notebook to learn the basic usage of Docker & Vagrant.
28.11.2016
Let's first take a look of the usage of some commands:
docker run
bash
!docker run -it ubuntu /bin/bash
remark: -i:interactive;
-t:tty(teletypewriter, i.e. text-only console)
End of explanation
"""
!docker run --name daemon-dyson -d ubuntu /bin/bash -c "while true; do echo hello world;sleep 1;done"
"""
Explanation: exercise: keep printing hello world before the termination of the daemon:
End of explanation
"""
!docker ps -a
"""
Explanation: remark: -d:daemon; -c:commands
docker ps
End of explanation
"""
!docker logs daemon-dyson
!docker logs -t --tail 10 daemon-dyson
!docker logs -t -f daemon-dyson
"""
Explanation: docker logs
End of explanation
"""
!docker top daemon-dyson
showing the last three processes:
!docker ps -n 3
"""
Explanation: the flag -f will keep refreshing the log file.
End of explanation
"""
!docker inspect daemon-dyson
"""
Explanation: docker attach
(attach to a running container)
bash
!docker attach daemon-dyson
docker inspect
inspect the selected container:
End of explanation
"""
%%bash
docker inspect --format="{{.State.Running}}" daemon-dyson
%%bash
docker inspect --format="{{.NetworkSettings.IPAddress}}" daemon-dyson
"""
Explanation: extract some info from the command docker inspect:
End of explanation
"""
%%bash
docker inspect daemon-dyson |grep -A 100 "NetworkSettings"
"""
Explanation: the -A flag of grep:
(-A: afterwards)
End of explanation
"""
%%bash
docker stop daemon-dave
docker rm daemon-dave
%%bash
docker ps -a
!docker images
"""
Explanation: stop and remove a daemon:
End of explanation
"""
!docker search ubuntu
!docker search dafu
%%bash
docker search busybox
%%bash
docker pull busybox
!docker history ubuntu
$docker rm $(docker ps -a -q)
for name in ["daemon-dyson","thirsty_kirch","lonely_brahmagupta","hungry_ritchie","modest_bartik","lonely_stallman"]:
!docker rm {name}
!docker ps -a
"""
Explanation: docker search
End of explanation
"""
%%bash
cd static_web/
ls
docker build -t "iiiedu/static_web" .
%%bash
docker images
%%bash
docker history iiiedu/static_web
%%bash
docker run -d -p 80 --name static_web iiiedu/static_web
%%bash
docker ps
"""
Explanation: About Docker Image:
check http://imagelayers.io/ or https://microbadger.com/images/ for some further details of a specific image of layers.
build a docker image
we add some layers to the base image ubuntu:
```bash
Version: 0.0.1
FROM ubuntu
MAINTAINER CHWENG "CHWENG@example.com"
Avoid ERROR: invoke-rc.d: policy-rc.d denied execution of start.
RUN echo "#!/bin/sh\nexit 0" > /usr/sbin/policy-rc.d
RUN apt-get update
RUN apt-get install -y nginx
RUN echo 'Hi, I am in the container' \
>/var/www/html/index.html
#>/usr/share/nginx/html/index.html
CMD [ "nginx", "-g", "daemon off;" ]EXPOSE 80
```
End of explanation
"""
%%bash
docker-machine env default
"""
Explanation: http://192.168.99.100:32768/
docker exec
use docker exec in order to access to an interactive terminal of the daemonized container (since if we use docker attach, we'll obtain a running terminal, which is not convenient if we'd like to do some extra things interactively.)
bash
docker exec -it static_web /bin/bash
to look for the location of the html file generated by nginx:
bash
root@7df500d5695b:/# find . -name "*.html"
./var/www/html/index.nginx-debian.html
./usr/share/nginx/html/index.html
./usr/share/doc/libfreetype6/ft2faq.html
./usr/share/doc/base-passwd/users-and-groups.html
./usr/share/doc/adduser/examples/adduser.local.conf.examples/skel.other/index.html
bash
docker exec -t stat /bin/ps
PID TTY TIME CMD
35 ? 00:00:00 ps
use the flag -v(volume) to share files with the container:
bash
docker run -it -P --name web-apache -v ~/apache2/html:/var/www/html iiiedu/apache
(a volume that is going to share data with the container will be created)
exercise: run a nginx server
(the volume is mounted with the read only option)
bash
docker run -d -p 80:80 -v /tmp/www:/usr/share/nginx/html:ro nginx
troubleshooting: env in windows
In windows 7, I met some problem while trying to access docker from BASH (MINGW64). This is fixed after following
https://github.com/docker/docker/issues/22338
End of explanation
"""
%%bash
docker run -d --name java_sock -p 9000:9000 -v $PWD/code:/javatest iiiedu/socket 9000
"""
Explanation: exercise: listen to the socket 9000
End of explanation
"""
%%bash
docker search tomcat
%%bash
docker pull tomcat:8.0
"""
Explanation: tomcat
https://hub.docker.com/_/tomcat/
End of explanation
"""
docker run -d --name tomcat_jsp -p 8080:8080 -v $PWD/code:/usr/local/tomcat/webaps/jsptest iiiedu/tomcat
"""
Explanation: bash
docker run --rm tomcat:8.0 -P --name mytomcat
the flag --rm: remove the container after finishing the execusion
```bash
FROM tomcat:8.0
MAINTAINER iii education
USER root
RUN mkdir /usr/local/tomcat/webapps/jsptes
WORKDIR /usr/local/tomcat/webapps/jsptest
```
End of explanation
"""
!vagrant --help
%%bash
cd ~/
vagrant box add iiiedu/trusty64 ubuntu-trusty64.box
!vagrant box list
%%bash
cd ~/
mkdir demo
cd demo
vagrant init iiiedu/trusty64
%%bash
cat ~/demo/Vagrantfile
%%bash
cd ~/demo
vagrant up
"""
Explanation: 30.11.2016
Vagrant
End of explanation
"""
%%bash
cd ~/hadoop_base
vagrant up
"""
Explanation: workflow:
1. use box add to give vagrant an image (xxx.box) and give it a tag name, e.g. ubuntu/trusty64
2. vagrant init ubuntu/trusty64 (will create the file Vagrantfile)
3. vagrant up (set-up the guest node according to the Vagrantfile and then turn on the guest node)
4. vagrant ssh (login to the guest node)
I was trying to find the folder which contains motd(message of the day) settings. This is done by using the forgotten "find" command:
bash
vagrant@vagrant-ubuntu-trusty-64:~$ sudo find / -type d -name "*motd*"
/etc/update-motd.d
check the file system and the harddisk:
```bash
root@vagrant-ubuntu-trusty-64:/vagrant# fdisk -l
Disk /dev/sda: 42.9 GB, 42949672960 bytes
4 heads, 32 sectors/track, 655360 cylinders, total 83886080 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00021928
Device Boot Start End Blocks Id System
/dev/sda1 * 2048 83886079 41942016 83 Linux
root@vagrant-ubuntu-trusty-64:/vagrant# df -h
Filesystem Size Used Avail Use% Mounted on
udev 241M 12K 241M 1% /dev
tmpfs 49M 344K 49M 1% /run
/dev/sda1 40G 1.4G 37G 4% /
none 4.0K 0 4.0K 0% /sys/fs/cgroup
none 5.0M 0 5.0M 0% /run/lock
none 245M 0 245M 0% /run/shm
none 100M 0 100M 0% /run/user
vagrant 201G 124G 77G 62% /vagrant
```
Indeed, the mounted mount point /vagrant is the directory that is shared in between the host and the guest.
Remark: use hostnamectl to modify the host name when necessary. I am not going to do this.
End of explanation
"""
%%bash
cd ~/hadoop_base
vagrant package --output hadoop_base.box
%%bash
cd ~/hadoop_base
vagrant box add iiiedu/hadoop_base hadoop_base.box
%%bash
vagrant box list
%%bash
cd ~/
mkdir hadoop_node
cd hadoop_node
vagrant init iiiedu/hadoop_base
%%bash
cd ~/hadoop_node
vagrant up
%%bash
cd ~/hadoop_cluster
vagrant package master --output master.box
vagrant package slave1 --output slave1.box
vagrant package slave2 --output slave2.box
vagrant package slave3 --output slave3.box
vagrant package slave4 --output slave4.box
vagrant package slave5 --output slave5.box
"""
Explanation: https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/
End of explanation
"""
|
calroc/joypy | docs/Advent of Code 2017 December 3rd.ipynb | gpl-3.0 | k = 4
"""
Explanation: Advent of Code 2017
December 3rd
You come across an experimental new kind of memory stored on an infinite two-dimensional grid.
Each square on the grid is allocated in a spiral pattern starting at a location marked 1 and then counting up while spiraling outward. For example, the first few squares are allocated like this:
17 16 15 14 13
18 5 4 3 12
19 6 1 2 11
20 7 8 9 10
21 22 23---> ...
While this is very space-efficient (no squares are skipped), requested data must be carried back to square 1 (the location of the only access port for this memory system) by programs that can only move up, down, left, or right. They always take the shortest path: the Manhattan Distance between the location of the data and square 1.
For example:
Data from square 1 is carried 0 steps, since it's at the access port.
Data from square 12 is carried 3 steps, such as: down, left, left.
Data from square 23 is carried only 2 steps: up twice.
Data from square 1024 must be carried 31 steps.
How many steps are required to carry the data from the square identified in your puzzle input all the way to the access port?
Analysis
I freely admit that I worked out the program I wanted to write using graph paper and some Python doodles. There's no point in trying to write a Joy program until I'm sure I understand the problem well enough.
The first thing I did was to write a column of numbers from 1 to n (32 as it happens) and next to them the desired output number, to look for patterns directly:
1 0
2 1
3 2
4 1
5 2
6 1
7 2
8 1
9 2
10 3
11 2
12 3
13 4
14 3
15 2
16 3
17 4
18 3
19 2
20 3
21 4
22 3
23 2
24 3
25 4
26 5
27 4
28 3
29 4
30 5
31 6
32 5
There are four groups repeating for a given "rank", then the pattern enlarges and four groups repeat again, etc.
1 2
3 2 3 4
5 4 3 4 5 6
7 6 5 4 5 6 7 8
9 8 7 6 5 6 7 8 9 10
Four of this pyramid interlock to tile the plane extending from the initial "1" square.
2 3 | 4 5 | 6 7 | 8 9
10 11 12 13|14 15 16 17|18 19 20 21|22 23 24 25
And so on.
We can figure out the pattern for a row of the pyramid at a given "rank" $k$:
$2k - 1, 2k - 2, ..., k, k + 1, k + 2, ..., 2k$
or
$k + (k - 1), k + (k - 2), ..., k, k + 1, k + 2, ..., k + k$
This shows that the series consists at each place of $k$ plus some number that begins at $k - 1$, decreases to zero, then increases to $k$. Each row has $2k$ members.
Let's figure out how, given an index into a row, we can calculate the value there. The index will be from 0 to $k - 1$.
Let's look at an example, with $k = 4$:
0 1 2 3 4 5 6 7
7 6 5 4 5 6 7 8
End of explanation
"""
for n in range(2 * k):
print abs(n - k),
"""
Explanation: Subtract $k$ from the index and take the absolute value:
End of explanation
"""
for n in range(2 * k):
print abs(n - (k - 1)),
"""
Explanation: Not quite. Subtract $k - 1$ from the index and take the absolute value:
End of explanation
"""
for n in range(2 * k):
print abs(n - (k - 1)) + k,
"""
Explanation: Great, now add $k$...
End of explanation
"""
def row_value(k, i):
i %= (2 * k) # wrap the index at the row boundary.
return abs(i - (k - 1)) + k
k = 5
for i in range(2 * k):
print row_value(k, i),
"""
Explanation: So to write a function that can give us the value of a row at a given index:
End of explanation
"""
def rank_and_offset(n):
assert n >= 2 # Guard the domain.
n -= 2 # Subtract two,
# one for the initial square,
# and one because we are counting from 1 instead of 0.
k = 1
while True:
m = 8 * k # The number of places total in this rank, 4(2k).
if n < m:
return k, n % (2 * k)
n -= m # Remove this rank's worth.
k += 1
for n in range(2, 51):
print n, rank_and_offset(n)
for n in range(2, 51):
k, i = rank_and_offset(n)
print n, row_value(k, i)
"""
Explanation: (I'm leaving out details of how I figured this all out and just giving the relevent bits. It took a little while to zero in of the aspects of the pattern that were important for the task.)
Finding the rank and offset of a number.
Now that we can compute the desired output value for a given rank and the offset (index) into that rank, we need to determine how to find the rank and offset of a number.
The rank is easy to find by iteratively stripping off the amount already covered by previous ranks until you find the one that brackets the target number. Because each row is $2k$ places and there are $4$ per rank each rank contains $8k$ places. Counting the initial square we have:
$corner_k = 1 + \sum_{n=1}^k 8n$
I'm not mathematically sophisticated enough to turn this directly into a formula (but Sympy is, see below.) I'm going to write a simple Python function to iterate and search:
End of explanation
"""
def row_value(k, i):
return abs(i - (k - 1)) + k
def rank_and_offset(n):
n -= 2 # Subtract two,
# one for the initial square,
# and one because we are counting from 1 instead of 0.
k = 1
while True:
m = 8 * k # The number of places total in this rank, 4(2k).
if n < m:
return k, n % (2 * k)
n -= m # Remove this rank's worth.
k += 1
def aoc20173(n):
if n <= 1:
return 0
k, i = rank_and_offset(n)
return row_value(k, i)
aoc20173(23)
aoc20173(23000)
aoc20173(23000000000000)
"""
Explanation: Putting it all together
End of explanation
"""
from sympy import floor, lambdify, solve, symbols
from sympy import init_printing
init_printing()
k = symbols('k')
"""
Explanation: Sympy to the Rescue
Find the rank for large numbers
Using e.g. Sympy we can find the rank directly by solving for the roots of an equation. For large numbers this will (eventually) be faster than iterating as rank_and_offset() does.
End of explanation
"""
E = 2 + 8 * k * (k + 1) / 2 # For the reason for adding 2 see above.
E
"""
Explanation: Since
$1 + 2 + 3 + ... + N = \frac{N(N + 1)}{2}$
and
$\sum_{n=1}^k 8n = 8(\sum_{n=1}^k n) = 8\frac{k(k + 1)}{2}$
We want:
End of explanation
"""
def rank_of(n):
return floor(max(solve(E - n, k))) + 1
"""
Explanation: We can write a function to solve for $k$ given some $n$...
End of explanation
"""
for n in (9, 10, 25, 26, 49, 50):
print n, rank_of(n)
"""
Explanation: First solve() for $E - n = 0$ which has two solutions (because the equation is quadratic so it has two roots) and since we only care about the larger one we use max() to select it. It will generally not be a nice integer (unless $n$ is the number of an end-corner of a rank) so we take the floor() and add 1 to get the integer rank of $n$. (Taking the ceiling() gives off-by-one errors on the rank boundaries. I don't know why. I'm basically like a monkey doing math here.) =-D
It gives correct answers:
End of explanation
"""
%time rank_of(23000000000000) # Compare runtime with rank_and_offset()!
%time rank_and_offset(23000000000000)
"""
Explanation: And it runs much faster (at least for large numbers):
End of explanation
"""
y = symbols('y')
g, f = solve(E - y, k)
"""
Explanation: After finding the rank you would still have to find the actual value of the rank's first corner and subtract it (plus 2) from the number and compute the offset as above and then the final output, but this overhead is partially shared by the other method, and overshadowed by the time it (the other iterative method) would take for really big inputs.
The fun thing to do here would be to graph the actual runtime of both methods against each other to find the trade-off point.
It took me a second to realize I could do this...
Sympy is a symbolic math library, and it supports symbolic manipulation of equations. I can put in $y$ (instead of a value) and ask it to solve for $k$.
End of explanation
"""
g
f
"""
Explanation: The equation is quadratic so there are two roots, we are interested in the greater one...
End of explanation
"""
floor(f) + 1
F = lambdify(y, floor(f) + 1)
for n in (9, 10, 25, 26, 49, 50):
print n, int(F(n))
"""
Explanation: Now we can take the floor(), add 1, and lambdify() the equation to get a Python function that calculates the rank directly.
End of explanation
"""
%time int(F(23000000000000)) # The clear winner.
"""
Explanation: It's pretty fast.
End of explanation
"""
from math import floor as mfloor, sqrt
def mrank_of(n):
return int(mfloor(sqrt(23000000000000 - 1) / 2 - 0.5) + 1)
%time mrank_of(23000000000000)
"""
Explanation: Knowing the equation we could write our own function manually, but the speed is no better.
End of explanation
"""
def offset_of(n, k):
return (n - 2 + 4 * k * (k - 1)) % (2 * k)
"""
Explanation: Given $n$ and a rank, compute the offset.
Now that we have a fast way to get the rank, we still need to use it to compute the offset into a pyramid row.
End of explanation
"""
offset_of(23000000000000, 2397916)
"""
Explanation: (Note the sneaky way the sign changes from $k(k + 1)$ to $k(k - 1)$. This is because we want to subract the $(k - 1)$th rank's total places (its own and those of lesser rank) from our $n$ of rank $k$. Substituting $k - 1$ for $k$ in $k(k + 1)$ gives $(k - 1)(k - 1 + 1)$, which of course simplifies to $k(k - 1)$.)
End of explanation
"""
def rank_of(n):
return int(mfloor(sqrt(n - 1) / 2 - 0.5) + 1)
def offset_of(n, k):
return (n - 2 + 4 * k * (k - 1)) % (2 * k)
def row_value(k, i):
return abs(i - (k - 1)) + k
def aoc20173(n):
k = rank_of(n)
i = offset_of(n, k)
return row_value(k, i)
aoc20173(23)
aoc20173(23000)
aoc20173(23000000000000)
%time aoc20173(23000000000000000000000000) # Fast for large values.
"""
Explanation: So, we can compute the rank, then the offset, then the row value.
End of explanation
"""
from notebook_preamble import J, V, define
"""
Explanation: A Joy Version
At this point I feel confident that I can implement a concise version of this code in Joy. ;-)
End of explanation
"""
define('rank_of == -- sqrt 2 / 0.5 - floor ++')
"""
Explanation: rank_of
n rank_of
---------------
k
The translation is straightforward.
int(floor(sqrt(n - 1) / 2 - 0.5) + 1)
rank_of == -- sqrt 2 / 0.5 - floor ++
End of explanation
"""
define('offset_of == dup 2 * [dup -- 4 * * 2 + -] dip %')
"""
Explanation: offset_of
n k offset_of
-------------------
i
(n - 2 + 4 * k * (k - 1)) % (2 * k)
A little tricky...
n k dup 2 *
n k k 2 *
n k k*2 [Q] dip %
n k Q k*2 %
n k dup --
n k k --
n k k-1 4 * * 2 + -
n k*k-1*4 2 + -
n k*k-1*4+2 -
n-k*k-1*4+2
n-k*k-1*4+2 k*2 %
n-k*k-1*4+2%k*2
Ergo:
offset_of == dup 2 * [dup -- 4 * * 2 + -] dip %
End of explanation
"""
define('row_value == over -- - abs +')
"""
Explanation: row_value
k i row_value
-------------------
n
abs(i - (k - 1)) + k
k i over -- - abs +
k i k -- - abs +
k i k-1 - abs +
k i-k-1 abs +
k |i-k-1| +
k+|i-k-1|
End of explanation
"""
define('aoc2017.3 == dup rank_of [offset_of] dupdip swap row_value')
J('23 aoc2017.3')
J('23000 aoc2017.3')
V('23000000000000 aoc2017.3')
"""
Explanation: aoc2017.3
n aoc2017.3
-----------------
m
n dup rank_of
n k [offset_of] dupdip
n k offset_of k
i k swap row_value
k i row_value
m
End of explanation
"""
|
dnc1994/MachineLearning-UW | ml-classification/module-4-linear-classifier-regularization-solution.ipynb | mit | from __future__ import division
import graphlab
"""
Explanation: Logistic Regression with L2 regularization
The goal of this second notebook is to implement your own logistic regression classifier with L2 regularization. You will do the following:
Extract features from Amazon product reviews.
Convert an SFrame into a NumPy array.
Write a function to compute the derivative of log likelihood function with an L2 penalty with respect to a single coefficient.
Implement gradient ascent with an L2 penalty.
Empirically explore how the L2 penalty can ameliorate overfitting.
Fire up GraphLab Create
Make sure you have the latest version of GraphLab Create. Upgrade by
pip install graphlab-create --upgrade
See this page for detailed instructions on upgrading.
End of explanation
"""
products = graphlab.SFrame('amazon_baby_subset.gl/')
"""
Explanation: Load and process review dataset
For this assignment, we will use the same subset of the Amazon product review dataset that we used in Module 3 assignment. The subset was chosen to contain similar numbers of positive and negative reviews, as the original dataset consisted of mostly positive reviews.
End of explanation
"""
# The same feature processing (same as the previous assignments)
# ---------------------------------------------------------------
import json
with open('important_words.json', 'r') as f: # Reads the list of most frequent words
important_words = json.load(f)
important_words = [str(s) for s in important_words]
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
# Remove punctuation.
products['review_clean'] = products['review'].apply(remove_punctuation)
# Split out the words into individual columns
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
"""
Explanation: Just like we did previously, we will work with a hand-curated list of important words extracted from the review data. We will also perform 2 simple data transformations:
Remove punctuation using Python's built-in string functionality.
Compute word counts (only for the important_words)
Refer to Module 3 assignment for more details.
End of explanation
"""
products
"""
Explanation: Now, let us take a look at what the dataset looks like (Note: This may take a few minutes).
End of explanation
"""
train_data, validation_data = products.random_split(.8, seed=2)
print 'Training set : %d data points' % len(train_data)
print 'Validation set : %d data points' % len(validation_data)
"""
Explanation: Train-Validation split
We split the data into a train-validation split with 80% of the data in the training set and 20% of the data in the validation set. We use seed=2 so that everyone gets the same result.
Note: In previous assignments, we have called this a train-test split. However, the portion of data that we don't train on will be used to help select model parameters. Thus, this portion of data should be called a validation set. Recall that examining performance of various potential models (i.e. models with different parameters) should be on a validation set, while evaluation of selected model should always be on a test set.
End of explanation
"""
import numpy as np
def get_numpy_data(data_sframe, features, label):
data_sframe['intercept'] = 1
features = ['intercept'] + features
features_sframe = data_sframe[features]
feature_matrix = features_sframe.to_numpy()
label_sarray = data_sframe[label]
label_array = label_sarray.to_numpy()
return(feature_matrix, label_array)
"""
Explanation: Convert SFrame to NumPy array
Just like in the second assignment of the previous module, we provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned: one representing features and another representing class labels.
Note: The feature matrix includes an additional column 'intercept' filled with 1's to take account of the intercept term.
End of explanation
"""
feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment')
feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment')
"""
Explanation: We convert both the training and validation sets into NumPy arrays.
Warning: This may take a few minutes.
End of explanation
"""
'''
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
## YOUR CODE HERE
scores = np.dot(feature_matrix, coefficients)
# Compute P(y_i = +1 | x_i, w) using the link function
## YOUR CODE HERE
predictions = 1. / (1. + np.exp(- scores))
return predictions
"""
Explanation: Building on logistic regression with no L2 penalty assignment
Let us now build on Module 3 assignment. Recall from lecture that the link function for logistic regression can be defined as:
$$
P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))},
$$
where the feature vector $h(\mathbf{x}_i)$ is given by the word counts of important_words in the review $\mathbf{x}_i$.
We will use the same code as in this past assignment to make probability predictions since this part is not affected by the L2 penalty. (Only the way in which the coefficients are learned is affected by the addition of a regularization term.)
End of explanation
"""
def feature_derivative_with_L2(errors, feature, coefficient, l2_penalty, feature_is_constant):
# Compute the dot product of errors and feature
## YOUR CODE HERE
derivative = np.dot(errors, feature)
# add L2 penalty term for any feature that isn't the intercept.
if not feature_is_constant:
## YOUR CODE HERE
derivative -= 2 * l2_penalty * coefficient
return derivative
"""
Explanation: Adding L2 penalty
Let us now work on extending logistic regression with L2 regularization. As discussed in the lectures, the L2 regularization is particularly useful in preventing overfitting. In this assignment, we will explore L2 regularization in detail.
Recall from lecture and the previous assignment that for logistic regression without an L2 penalty, the derivative of the log likelihood function is:
$$
\frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right)
$$
Adding L2 penalty to the derivative
It takes only a small modification to add a L2 penalty. All terms indicated in red refer to terms that were added due to an L2 penalty.
Recall from the lecture that the link function is still the sigmoid:
$$
P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))},
$$
We add the L2 penalty term to the per-coefficient derivative of log likelihood:
$$
\frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) \color{red}{-2\lambda w_j }
$$
The per-coefficient derivative for logistic regression with an L2 penalty is as follows:
$$
\frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) \color{red}{-2\lambda w_j }
$$
and for the intercept term, we have
$$
\frac{\partial\ell}{\partial w_0} = \sum{i=1}^N h_0(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right)
$$
Note: As we did in the Regression course, we do not apply the L2 penalty on the intercept. A large intercept does not necessarily indicate overfitting because the intercept is not associated with any particular feature.
Write a function that computes the derivative of log likelihood with respect to a single coefficient $w_j$. Unlike its counterpart in the last assignment, the function accepts five arguments:
* errors vector containing $(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w}))$ for all $i$
* feature vector containing $h_j(\mathbf{x}_i)$ for all $i$
* coefficient containing the current value of coefficient $w_j$.
* l2_penalty representing the L2 penalty constant $\lambda$
* feature_is_constant telling whether the $j$-th feature is constant or not.
End of explanation
"""
def compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
lp = np.sum((indicator-1)*scores - np.log(1. + np.exp(-scores))) - l2_penalty*np.sum(coefficients[1:]**2)
return lp
"""
Explanation: Quiz question: In the code above, was the intercept term regularized?
To verify the correctness of the gradient ascent algorithm, we provide a function for computing log likelihood (which we recall from the last assignment was a topic detailed in an advanced optional video, and used here for its numerical stability).
$$\ell\ell(\mathbf{w}) = \sum_{i=1}^N \Big( (\mathbf{1}[y_i = +1] - 1)\mathbf{w}^T h(\mathbf{x}_i) - \ln\left(1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))\right) \Big) \color{red}{-\lambda\|\mathbf{w}\|_2^2} $$
End of explanation
"""
def logistic_regression_with_L2(feature_matrix, sentiment, initial_coefficients, step_size, l2_penalty, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
for itr in xrange(max_iter):
# Predict P(y_i = +1|x_i,w) using your predict_probability() function
## YOUR CODE HERE
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = indicator - predictions
for j in xrange(len(coefficients)): # loop over each coefficient
is_intercept = (j == 0)
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].
# Compute the derivative for coefficients[j]. Save it in a variable called derivative
## YOUR CODE HERE
derivative = feature_derivative_with_L2(errors, feature_matrix[:,j], coefficients[j], l2_penalty, is_intercept)
# add the step size times the derivative to the current coefficient
## YOUR CODE HERE
coefficients[j] += step_size * derivative
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
lp = compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty)
print 'iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp)
return coefficients
"""
Explanation: Quiz question: Does the term with L2 regularization increase or decrease $\ell\ell(\mathbf{w})$?
The logistic regression function looks almost like the one in the last assignment, with a minor modification to account for the L2 penalty. Fill in the code below to complete this modification.
End of explanation
"""
# run with L2 = 0
coefficients_0_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=0, max_iter=501)
# run with L2 = 4
coefficients_4_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=4, max_iter=501)
# run with L2 = 10
coefficients_10_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=10, max_iter=501)
# run with L2 = 1e2
coefficients_1e2_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e2, max_iter=501)
# run with L2 = 1e3
coefficients_1e3_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e3, max_iter=501)
# run with L2 = 1e5
coefficients_1e5_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e5, max_iter=501)
"""
Explanation: Explore effects of L2 regularization
Now that we have written up all the pieces needed for regularized logistic regression, let's explore the benefits of using L2 regularization in analyzing sentiment for product reviews. As iterations pass, the log likelihood should increase.
Below, we train models with increasing amounts of regularization, starting with no L2 penalty, which is equivalent to our previous logistic regression implementation.
End of explanation
"""
table = graphlab.SFrame({'word': ['(intercept)'] + important_words})
def add_coefficients_to_table(coefficients, column_name):
table[column_name] = coefficients
return table
"""
Explanation: Compare coefficients
We now compare the coefficients for each of the models that were trained above. We will create a table of features and learned coefficients associated with each of the different L2 penalty values.
Below is a simple helper function that will help us create this table.
End of explanation
"""
add_coefficients_to_table(coefficients_0_penalty, 'coefficients [L2=0]')
add_coefficients_to_table(coefficients_4_penalty, 'coefficients [L2=4]')
add_coefficients_to_table(coefficients_10_penalty, 'coefficients [L2=10]')
add_coefficients_to_table(coefficients_1e2_penalty, 'coefficients [L2=1e2]')
add_coefficients_to_table(coefficients_1e3_penalty, 'coefficients [L2=1e3]')
add_coefficients_to_table(coefficients_1e5_penalty, 'coefficients [L2=1e5]')
"""
Explanation: Now, let's run the function add_coefficients_to_table for each of the L2 penalty strengths.
End of explanation
"""
subtable = table[['word', 'coefficients [L2=0]']]
ptable = sorted(subtable, key=lambda x: x['coefficients [L2=0]'], reverse=True)[:5]
ntable = sorted(subtable, key=lambda x: x['coefficients [L2=0]'], reverse=False)[:5]
positive_words = [w['word'] for w in ptable]
print positive_words
negative_words = [w['word'] for w in ntable]
print negative_words
"""
Explanation: Using the coefficients trained with L2 penalty 0, find the 5 most positive words (with largest positive coefficients). Save them to positive_words. Similarly, find the 5 most negative words (with largest negative coefficients) and save them to negative_words.
Quiz Question. Which of the following is not listed in either positive_words or negative_words?
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 6
def make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list):
cmap_positive = plt.get_cmap('Reds')
cmap_negative = plt.get_cmap('Blues')
xx = l2_penalty_list
plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k')
table_positive_words = table.filter_by(column_name='word', values=positive_words)
table_negative_words = table.filter_by(column_name='word', values=negative_words)
del table_positive_words['word']
del table_negative_words['word']
for i in xrange(len(positive_words)):
color = cmap_positive(0.8*((i+1)/(len(positive_words)*1.2)+0.15))
plt.plot(xx, table_positive_words[i:i+1].to_numpy().flatten(),
'-', label=positive_words[i], linewidth=4.0, color=color)
for i in xrange(len(negative_words)):
color = cmap_negative(0.8*((i+1)/(len(negative_words)*1.2)+0.15))
plt.plot(xx, table_negative_words[i:i+1].to_numpy().flatten(),
'-', label=negative_words[i], linewidth=4.0, color=color)
plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5)
plt.axis([1, 1e5, -1, 2])
plt.title('Coefficient path')
plt.xlabel('L2 penalty ($\lambda$)')
plt.ylabel('Coefficient value')
plt.xscale('log')
plt.rcParams.update({'font.size': 18})
plt.tight_layout()
"""
Explanation: Let us observe the effect of increasing L2 penalty on the 10 words just selected. We provide you with a utility function to plot the coefficient path.
End of explanation
"""
make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list=[0, 4, 10, 1e2, 1e3, 1e5])
"""
Explanation: Run the following cell to generate the plot. Use the plot to answer the following quiz question.
End of explanation
"""
def get_classification_accuracy(feature_matrix, sentiment, coefficients):
scores = np.dot(feature_matrix, coefficients)
apply_threshold = np.vectorize(lambda x: 1. if x > 0 else -1.)
predictions = apply_threshold(scores)
num_correct = (predictions == sentiment).sum()
accuracy = num_correct / len(feature_matrix)
return accuracy
"""
Explanation: Quiz Question: (True/False) All coefficients consistently get smaller in size as the L2 penalty is increased.
Quiz Question: (True/False) The relative order of coefficients is preserved as the L2 penalty is increased. (For example, if the coefficient for 'cat' was more positive than that for 'dog', this remains true as the L2 penalty increases.)
Measuring accuracy
Now, let us compute the accuracy of the classifier model. Recall that the accuracy is given by
$$
\mbox{accuracy} = \frac{\mbox{# correctly classified data points}}{\mbox{# total data points}}
$$
Recall from lecture that that the class prediction is calculated using
$$
\hat{y}_i =
\left{
\begin{array}{ll}
+1 & h(\mathbf{x}_i)^T\mathbf{w} > 0 \
-1 & h(\mathbf{x}_i)^T\mathbf{w} \leq 0 \
\end{array}
\right.
$$
Note: It is important to know that the model prediction code doesn't change even with the addition of an L2 penalty. The only thing that changes is the estimated coefficients used in this prediction.
Based on the above, we will use the same code that was used in Module 3 assignment.
End of explanation
"""
train_accuracy = {}
train_accuracy[0] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_0_penalty)
train_accuracy[4] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_4_penalty)
train_accuracy[10] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_10_penalty)
train_accuracy[1e2] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e2_penalty)
train_accuracy[1e3] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e3_penalty)
train_accuracy[1e5] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e5_penalty)
validation_accuracy = {}
validation_accuracy[0] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_0_penalty)
validation_accuracy[4] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_4_penalty)
validation_accuracy[10] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_10_penalty)
validation_accuracy[1e2] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e2_penalty)
validation_accuracy[1e3] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e3_penalty)
validation_accuracy[1e5] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e5_penalty)
# Build a simple report
for key in sorted(validation_accuracy.keys()):
print "L2 penalty = %g" % key
print "train accuracy = %s, validation_accuracy = %s" % (train_accuracy[key], validation_accuracy[key])
print "--------------------------------------------------------------------------------"
"""
Explanation: Below, we compare the accuracy on the training data and validation data for all the models that were trained in this assignment. We first calculate the accuracy values and then build a simple report summarizing the performance for the various models.
End of explanation
"""
|
psychemedia/parlihacks | notebooks/RegisterOfInterests.ipynb | mit | url='http://downloads.membersinterests.org.uk/register/170707.zip'
!mkdir -p tmp/
!mkdir -p data/
!wget {url} -O tmp/temp.zip; unzip tmp/temp.zip -d data/ ; rm tmp/temp.zip
#Preview the data
!head -n 3 data/170707.csv
#View data in datatable
import pandas as pd
df=pd.read_csv('data/170707.csv',header=None)
df.columns=['Name','Constituency','Party','URL','Item']
df.head()
"""
Explanation: UK MPs - Register of Interests - Quick Sketch
Couple of hours hack around register of interests data...
Get Data
Seems we can find some from http://www.membersinterests.org.uk/.
End of explanation
"""
#!pip3 install spacy
#!python3 -m spacy download en
from spacy.en import English
parser = English()
def entities(example, show=False):
if show: print(example)
parsedEx = parser(example)
print("-------------- entities only ---------------")
# if you just want the entities and nothing else, you can do access the parsed examples "ents" property like this:
ents = list(parsedEx.ents)
tags={}
for entity in ents:
#print(entity.label, entity.label_, ' '.join(t.orth_ for t in entity))
term=' '.join(t.orth_ for t in entity)
if ' '.join(term) not in tags:
tags[term]=[(entity.label, entity.label_)]
else:
tags[term].append((entity.label, entity.label_))
print(tags)
#Get a single register line item to play with
txt=df.iloc[0]['Item']
txt
entities(txt, True)
"""
Explanation: Simple entity extraction
Quick pass at trying to extract entities locally using simple natural language extractor.
This is not necessarily that spophisticated - but it's a start...
End of explanation
"""
import requests
ocrecURL='http://opencorporates.com/reconcile/gb'
rq=requests.get(ocrecURL,params={'query':'Guardian News & Media'})
rq.json()
"""
Explanation: We might then try to reconcile things classed as an ORG using something like OpenCorporates API.
End of explanation
"""
CALAIS_KEY=""
import requests
import json
def calais(text, calaisKey=CALAIS_KEY):
calais_url = 'https://api.thomsonreuters.com/permid/calais'
headers = {'X-AG-Access-Token' : calaisKey, 'Content-Type' : 'text/raw', 'outputformat' : 'application/json'}
response = requests.post(calais_url, files={'file':text}, headers=headers, timeout=80)
return response.json()
def cleaner(txt):
txt=txt.replace('Address of','. Address of')
return txt
oc=calais( cleaner(txt) )
def ocQuickView(oc):
items={}
for k in oc.keys():
if '_typeGroup' in oc[k] and oc[k]['_typeGroup'] in ['entities','relations','socialTag','topics']:
k2=oc[k]['_typeGroup']
if k2 not in items: items[k2]=[]
record={}
#if '_type' in oc[k]:
# record['typ']=oc[k]['_type']
if 'instances' in oc[k]:
record['instances']=[i['exact'] for i in oc[k]['instances'] if 'exact' in i]
for k3 in ['name','address','_type']:
if k3 in oc[k]: record[k3] = oc[k][k3]
items[k2].append(record)
return items
ocQuickView(oc)
ix=155
txt=cleaner(df.iloc[ix]['Item'])
print('{}\n---\n{}'.format(txt, ocQuickView(calais(txt))))
ix=299
txt=cleaner(df.iloc[ix]['Item'])
print('{}\n---\n{}'.format(txt, ocQuickView(calais(txt))))
ix=863
txt=cleaner(df.iloc[ix]['Item'])
print('{}\n---\n{}'.format(txt, ocQuickView(calais(txt))))
"""
Explanation: Third Party Taggers
Examples of using third party taggers.
Thomson Reuters OpenCalais
End of explanation
"""
txt="Name of donor: Nael FarargyAddress of donor: privateAmount of donation or nature and value of donation in kind: £20,000 to hire a part time member of staff and meet office and staff expensesDate received: 12 April 2017Date accepted: 12 April 2017Donor status: individual(Registered 18 April 2017) "
txt
"""
Explanation: Observations
The free text has items that can be parsed out - e.g. Name of donor:, Amount of donation or nature and value if donation in kind:, etc.
End of explanation
"""
extractor1='Name of donor:(?P<name>.*)Address of donor:(?P<address>.*)Amount of donation or nature and value of donation in kind:(?P<amount>.*)Date received:(?P<rxd>.*)Date accepted:(?P<accptd>.*)Donor status(?P<status>.*)'
import re
r=re.compile(extractor1)
r.match(txt).groupdict()
#Looking at the response values, we could catch for whitespace in the regex or do a cleaning pass to strip whitespace
"""
Explanation: Define a regular expression to pull out the data in structured form if the text conforms to a conventional format.
End of explanation
"""
|
psyllost/02819 | Question_Answering_System_using_BERT_+_SQuAD_2_0_on_Colab_TPU.ipynb | apache-2.0 | !git clone https://github.com/google-research/bert.git
"""
Explanation: <a href="https://colab.research.google.com/github/psyllost/02819/blob/master/Question_Answering_System_using_BERT_%2B_SQuAD_2_0_on_Colab_TPU.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This colab file is created by Pragnakalp Techlabs.
You can copy this colab in your drive and then execute the command in given order. For more details check our blog NLP Tutorial: Setup Question Answering System using BERT + SQuAD on Colab TPU
Check our BERT based Question and Answering system demo for English and other 8 languages.
You can also purchase the Demo of our BERT based QnA system including fine-tuned models.
BERT Fine-tuning and Prediction on SQUAD 2.0 using Cloud TPU!
Overview
BERT, or Bidirectional Embedding Representations from Transformers, is a new method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. The academic paper can be found here: https://arxiv.org/abs/1810.04805.
SQuAD Stanford Question Answering Dataset is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.
This colab file shows how to fine-tune BERT on SQuAD dataset, and then how to perform the prediction. Using this you can create your own Question Answering System.
Prerequisite : You will need a GCP (Google Compute Engine) account and a GCS (Google Cloud Storage) bucket to run this colab file.
Please follow the Google Cloud for how to create GCP account and GCS bucket. You have $300 free credit to get started with any GCP product. You can learn more about it at https://cloud.google.com/tpu/docs/setup-gcp-account
You can create your GCS bucket from here http://console.cloud.google.com/storage.
Change Runtime to TPU
On the main menu, click on Runtime and select Change runtime type. Set "TPU" as the hardware accelerator.
Clone the BERT github repository
First Step is to Clone the BERT github repository, below is the way by which you can clone the repo from github.
End of explanation
"""
ls -l
cd bert
"""
Explanation: Confirm that BERT repo is cloned properly.
"ls -l" is used for long listing, if BERT repo is cloned properly you can see the BERT folder in current directory.
End of explanation
"""
ls -l
"""
Explanation: BERT repository files
use ls -l to check the content inside BERT folder, you can see all files related to BERT.
End of explanation
"""
!wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip
# Unzip the pretrained model
!unzip uncased_L-24_H-1024_A-16.zip
"""
Explanation: Download the BERT PRETRAINED MODEL
BERT Pretrained Model List :
BERT-Large, Uncased (Whole Word Masking) : 24-layer, 1024-hidden, 16-heads, 340M parameters
BERT-Large, Cased (Whole Word Masking) : 24-layer, 1024-hidden, 16-heads, 340M parameters
BERT-Base, Uncased : 12-layer, 768-hidden, 12-heads, 110M parameters
BERT-Large, Uncased : 24-layer, 1024-hidden, 16-heads, 340M parameters
BERT-Base, Cased: 12-layer, 768-hidden, 12-heads , 110M parameters
BERT-Large, Cased : 24-layer, 1024-hidden, 16-heads, 340M parameters
BERT-Base, Multilingual Cased (New, recommended) : 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
BERT-Base, Multilingual Uncased (Orig, not recommended) (Not recommended, use Multilingual Cased instead) : 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
BERT-Base, Chinese : Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M parameters
BERT has release BERT-Base and BERT-Large models. Uncased means that the text has been lowercased before WordPiece tokenization, e.g., John Smith becomes john smith, whereas Cased means that the true case and accent markers are preserved.
When using a cased model, make sure to pass --do_lower=False at the time of training.
You can download any model of your choice. We have used BERT-Large-Uncased Model.
End of explanation
"""
#Download the SQUAD train and dev dataset
!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json
!wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json
"""
Explanation: Download the SQUAD 2.0 Dataset
End of explanation
"""
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
assert 'COLAB_TPU_ADDR' in os.environ, 'ERROR: Not connected to a TPU runtime; please see the first cell in this notebook for instructions!'
TPU_ADDRESS = 'grpc://' + os.environ['COLAB_TPU_ADDR']
print('TPU address is => ', TPU_ADDRESS)
from google.colab import auth
auth.authenticate_user()
with tf.Session(TPU_ADDRESS) as session:
print('TPU devices:')
pprint.pprint(session.list_devices())
# Upload credentials to TPU.
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f)
tf.contrib.cloud.configure_gcs(session, credentials=auth_info)
# Now credentials are set for all future sessions on this TPU.
"""
Explanation: Set up your TPU environment
Verify that you are connected to a TPU device
You will get know your TPU Address that is used at time of fine-tuning
Perform Google Authentication to access your bucket
Upload your credentials to TPU to access your GCS bucket
End of explanation
"""
BUCKET = 'bertnlpdemo' #@param {type:"string"}
assert BUCKET, '*** Must specify an existing GCS bucket name ***'
output_dir_name = 'bert_output' #@param {type:"string"}
BUCKET_NAME = 'gs://{}'.format(BUCKET)
OUTPUT_DIR = 'gs://{}/{}'.format(BUCKET,output_dir_name)
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
"""
Explanation: Create output directory
Need to create a output directory at GCS (Google Cloud Storage) bucket, where you will get your fine_tuned model after training completion. For that you need to provide your BUCKET name and OUPUT DIRECTORY name.
Also need to move Pre-trained Model at GCS (Google Cloud Storage) bucket, as Local File System is not Supported on TPU. If you don't move your pretrained model to TPU you may face an error.
End of explanation
"""
!gsutil mv /content/bert/uncased_L-24_H-1024_A-16 $BUCKET_NAME
"""
Explanation: Move Pretrained Model to GCS Bucket
Need to move Pre-trained Model at GCS (Google Cloud Storage) bucket, as Local File System is not Supported on TPU. If you don't move your pretrained model to TPU you may face the error.
The gsutil mv command allows you to move data between your local file system and the cloud, move data within the cloud, and move data between cloud storage providers.
End of explanation
"""
!python run_squad.py \
--vocab_file=$BUCKET_NAME/uncased_L-24_H-1024_A-16/vocab.txt \
--bert_config_file=$BUCKET_NAME/uncased_L-24_H-1024_A-16/bert_config.json \
--init_checkpoint=$BUCKET_NAME/uncased_L-24_H-1024_A-16/bert_model.ckpt \
--do_train=True \
--train_file=train-v2.0.json \
--do_predict=True \
--predict_file=dev-v2.0.json \
--train_batch_size=24 \
--learning_rate=3e-5 \
--num_train_epochs=2.0 \
--use_tpu=True \
--tpu_name=grpc://10.1.118.82:8470 \
--max_seq_length=384 \
--doc_stride=128 \
--version_2_with_negative=True \
--output_dir=$OUTPUT_DIR
"""
Explanation: Training
Below is the command to run the training. To run the training on TPU you need to make sure about below Hyperparameter, that is tpu must be true and provide the tpu_address that we have find out above.
--use_tpu=True
--tpu_name=YOUR_TPU_ADDRESS
End of explanation
"""
!touch input_file.json
%%writefile input_file.json
{
"version": "v2.0",
"data": [
{
"title": "your_title",
"paragraphs": [
{
"qas": [
{
"question": "Who is current CEO?",
"id": "56ddde6b9a695914005b9628",
"is_impossible": ""
},
{
"question": "Who founded google?",
"id": "56ddde6b9a695914005b9629",
"is_impossible": ""
},
{
"question": "when did IPO take place?",
"id": "56ddde6b9a695914005b962a",
"is_impossible": ""
}
],
"context": "Google was founded in 1998 by Larry Page and Sergey Brin while they were Ph.D. students at Stanford University in California. Together they own about 14 percent of its shares and control 56 percent of the stockholder voting power through supervoting stock. They incorporated Google as a privately held company on September 4, 1998. An initial public offering (IPO) took place on August 19, 2004, and Google moved to its headquarters in Mountain View, California, nicknamed the Googleplex. In August 2015, Google announced plans to reorganize its various interests as a conglomerate called Alphabet Inc. Google is Alphabet's leading subsidiary and will continue to be the umbrella company for Alphabet's Internet interests. Sundar Pichai was appointed CEO of Google, replacing Larry Page who became the CEO of Alphabet."
}
]
}
]
}
"""
Explanation: Create Testing File
We are creating input_file.json as a blank json file and then writing the data in SQUAD format in the file.
touch is used to create a file
%%writefile is used to write a file in the colab
You can pass your own questions and context in the below file.
End of explanation
"""
!python run_squad.py \
--vocab_file=$BUCKET_NAME/uncased_L-24_H-1024_A-16/vocab.txt \
--bert_config_file=$BUCKET_NAME/uncased_L-24_H-1024_A-16/bert_config.json \
--init_checkpoint=$OUTPUT_DIR/model.ckpt-10859 \
--do_train=False \
--max_query_length=30 \
--do_predict=True \
--predict_file=input_file.json \
--predict_batch_size=8 \
--n_best_size=3 \
--max_seq_length=384 \
--doc_stride=128 \
--output_dir=output/
"""
Explanation: Prediction
Below is the command to perform your own custom prediction, that is you can change the input_file.json by providing your paragraph and questions after then execute the below command.
End of explanation
"""
|
volodymyrss/3ML | docs/notebooks/Minimization_tutorial.ipynb | bsd-3-clause | from threeML import *
import matplotlib.pyplot as plt
%matplotlib inline
from threeML.minimizer.tutorial_material import *
"""
Explanation: Minimization
When using a Maximum Likelihood analysis we want to find the maximum of the likelihood $L(\vec{\theta})$ given one or more datasets (i.e., plugin instances) and one model containing one or more sources with free parameters $\vec{\theta}$. Most of the available algorithms for function optimization find the minimum, not the maximum, of a function. Also, since the likelihood function is usually the product of many probabilities, bounded to be $0 < p < 1$, $L(\vec{\theta})$ tend to be very small. Hence, it is much more tractable numerically to deal with the logarithm of the likelihood. Therefore, instead of finding the maximum of the likelihood $L$, we find the minimum of the $-\log{L(\vec{\theta})}$ function. Of course, the values of $\vec{\theta}$ minimizing $-\log{L}$ are the same that maximize $L$, i.e.:
argmax${\vec{\theta}}~\left( L(\vec{\theta}) \right)$ = argmin${\vec{\theta}}~\left(-\log{L(\vec{\theta})}\right)$.
Various minimizers are available in 3ML. We can divide them in two groups: local minimizers and global minimizers.
Local minimizers
Most of the existing optimization algorithms are local minimizers (MINUIT, Levenberg–Marquardt, Netwton...).
A local minimizer starts from the current values for the free parameters $\vec{\theta}$ and try to reach the closest minimum of a function $f(\vec{\theta})$ (in 3ML this is usually the $-\log{L}$).
Many minimizers are based on the idea of gradient descent, i.e., they compute the local gradient of $f(\vec{\theta})$ and follow the function along the direction of steepest discent until the minimum. There are however also gradient-free algorithms, like for example COBYLA. While going into the details of how each algorithm works is beyond the scope, we illustrate here an example by using the Minuit algorithm.
Let's start by importing what we need in the following:
End of explanation
"""
# This returns a JointLikelihood object with a simple likelihood function,
# and the corresponding Model instance. These objects are what you will have
# in a typical 3ML analysis. The Model contains one point source, named "test",
# with a spectrum called "simple"
jl, model = get_joint_likelihood_object_simple_likelihood()
# Let's look at the likelihood function, which in this illustrative example
# has a very simple shape
_ = plot_likelihood_function(jl)
"""
Explanation: Let's get a JointLikelihood object like the one we would have in a normal 3ML analysis. We use a custom function, prepared for this tutorial, which gives a JointLikelihood object having a very simple model with one free parameter ($\mu$), and with a likelihood having a very simple shape:
End of explanation
"""
model.test.spectrum.main.shape.mu = 1.0
# The minuit minimizer is the default, so no setup is necessary
# quiet = True means that no result will be printed
res = jl.fit(quiet=True)
# This plots the path that Minuit has traveled looking for the minimum
# Arrows connect the different points, starting from 1.0 and going
# to 40, the minimum
fig = plot_minimizer_path(jl)
"""
Explanation: Now let's set up the Minuit minimizer and minimize the -log(L), starting from $\mu = 1$:
End of explanation
"""
model.test.spectrum.main.shape.mu = 80.0
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
"""
Explanation: Now let's do the same, but starting from $\mu=80$:
End of explanation
"""
model.test.spectrum.main.shape.mu = 20.0
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
"""
Explanation: and from $\mu=20$:
End of explanation
"""
jl, model = get_joint_likelihood_object_complex_likelihood()
_ = plot_likelihood_function(jl)
"""
Explanation: It is clear that, depending on the starting point, minuit makes different steps trying to reach the minimum. In this last case, at one point Minuit overshoots the minimum jumping all the way from $\sim 30$ to $\sim 80$, then realizes the mistake and goes back.
In the case of a simple, convex likelihood like this one, Minuit finds easily the minimum independently of the starting point.
Global minimization
Now let us consider the case of a more complicated $-\log{L}$ function:
End of explanation
"""
model.test.spectrum.main.shape.mu = 1.0
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
"""
Explanation: This likelihood function has 3 minima: 2 are local and one (at $\mu = 60$) is the global minimum. Let's see how Minuit performs in this case. First we start from 1.0:
End of explanation
"""
model.test.spectrum.main.shape.mu = 70
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
"""
Explanation: Minuit has found the local minimum, not the global one. Now we start from 80:
End of explanation
"""
# Create an instance of the GRID minimizer
grid_minimizer = GlobalMinimization("grid")
# Create an instance of a local minimizer, which will be used by GRID
local_minimizer = LocalMinimization("minuit")
# Define a grid for mu as 10 steps between 1 and 80
my_grid = {model.test.spectrum.main.shape.mu: np.linspace(1, 80, 10)}
# Setup the global minimization
# NOTE: the "callbacks" option is useless in a normal 3ML analysis, it is
# here only to keep track of the evolution for the plot
grid_minimizer.setup(second_minimization=local_minimizer, grid = my_grid,
callbacks=[get_callback(jl)])
# Set the minimizer for the JointLikelihood object
jl.set_minimizer(grid_minimizer)
jl.fit()
fig = plot_minimizer_path(jl)
"""
Explanation: Now we found the global minimum. This is a simple example to show that the solution find by a local minimizers can depend on the starting point, and might not be the global minimum. In practice, one can rarely be guaranteed that the likelihood function has only one minimum. This is especially true in many dimensions and in cases of data with poor statistic.
To alleviate this problem 3ML offers some "global minimizers". While it is impossible to guarantee that a global minimum will be reached, these minimizers are much more robust towards this kind of problems, at the expenses of a considerable longer runtime.
In 3ML each global minimizer must be associated with a local minimizer. The latter is used as final step to improve the solution found by the global minimizer and to compute the error matrix.
Grid minimizer
The idea behind this is very simple: the user defines a grid of values for the parameters, which are used as starting points for minimization performed by a local minimizers. At the end, the solution with the smallest value for $-\log{L}$ will be used as final solution.
For example, let's define a grid of 10 values for $\mu$. This means that 3ML will perform 10 local minimizations starting each time from a different point in the grid:
End of explanation
"""
# Reset the parameter to a value different from the best fit found
# by previous algorithms
jl, model = get_joint_likelihood_object_complex_likelihood()
model.test.spectrum.main.shape.mu = 2.5
# Create an instance of the PAGMO minimizer
pagmo_minimizer = GlobalMinimization("pagmo")
# Select one of the many algorithms provided by pagmo
# (see https://esa.github.io/pagmo2/docs/algorithm_list.html
# for a list).
# In this case we use the Artificial Bee Colony algorithm
# (see here for a description: https://link.springer.com/article/10.1007/s10898-007-9149-x)
import pygmo
my_algorithm = pygmo.algorithm(pygmo.bee_colony(gen=20))
# Create an instance of a local minimizer
local_minimizer = LocalMinimization("minuit")
# Setup the global minimization
pagmo_minimizer.setup(second_minimization = local_minimizer, algorithm=my_algorithm,
islands=10, population_size=10, evolution_cycles=1)
# Set the minimizer for the JointLikelihood object
jl.set_minimizer(pagmo_minimizer)
jl.fit()
# NOTE: given the inner working of pygmo, it is not possible
# to plot the evolution
"""
Explanation: The GRID minimizer has found the global minimum.
Of course the GRID minimizer can be used in multiple dimensions (simply define a grid for the other parameters as well). It is a simple brute force solution that works well in practice, especially when the likelihood function computation is not too time-consuming. When there are many parameters, you should choose carefully the parameters to use in the grid. For example, when looking for a spectral line in a spectrum, it makes sense to use the location of the line as parameter in the grid, but not its normalization.
PAGMO minimizer
The Pagmo minimizer is an open-source optimization suite provided by the European Space Agency:
https://esa.github.io/pagmo2/
It contains a lot of algorithms for optimization of different kinds:
https://esa.github.io/pagmo2/docs/algorithm_list.html
and it is very powerful. In order to be able to use it you need to install the python package pygmo (make sure to have version >= 2, as the old version 1.x has a different API and won't work with 3ML).
In Pagmo/pygmo, candidate solutions to the minimization are called "individuals". A population of individuals over which an algorithm acts to improve the solutions is called "island". An ensamble of islands that can share solutions along a defined topology and thus learn on their reciprocal progress is called "archipelago". The evolution of the populations can be executed more than once ("evolution cycles").
After the pygmo section of the optimization has been completed, the secondary minimizer will be used to further improve on the solution (if possible) and to compute the covariance matrix.
End of explanation
"""
# Reset the parameter to a value different from the best fit found
# by previous algorithms
jl, model = get_joint_likelihood_object_complex_likelihood()
model.test.spectrum.main.shape.mu = 5.0
# Create an instance of the PAGMO minimizer
multinest_minimizer = GlobalMinimization("multinest")
# Create an instance of a local minimizer
local_minimizer = LocalMinimization("minuit")
# Setup the global minimization
multinest_minimizer.setup(second_minimization = local_minimizer, live_points=100)
# Set the minimizer for the JointLikelihood object
jl.set_minimizer(multinest_minimizer)
jl.fit()
# Plots the point traversed by Multinest
fig = plot_minimizer_path(jl, points=True)
"""
Explanation: Multinest minimizer
MultiNest is a Bayesian inference tool which calculates the evidence and explores the parameter space which may contain multiple posterior modes and pronounced (curving) degeneracies in moderately high dimensions. It is not strictly a minimizer. However, given its capacity to explore multiple modes of the likelihood function (i.e., multiple local minima), it can be used as a global minimizer.
The Multinest minimizer in 3ML forms a posterior probability using the likelihood multiplied by uniformative priors. The priors are automatically chosen (uniform if the allowed parameter range is less than 2 orders of magnitudes or negative values are allowed, log-uniform otherwise). Then, Multinest is run in multimodal mode (multimodal=True). At the end of the run, among all the values of the $-\log{L}$ traversed by Multinest, the smaller one is chosen as starting point for the local minimizer.
End of explanation
"""
|
HazyResearch/metal | tutorials/Visualization.ipynb | apache-2.0 | import sys
sys.path.append('../../metal')
import metal
%load_ext autoreload
%autoreload 2
%matplotlib inline
"""
Explanation: Visualization Tutorial
Inside metal/contrib/visualization are a number of simple helper methods for visualizing label matrices.
For example, you can generate heat maps of the label matrix or overlaps/conflicts.
End of explanation
"""
import pickle
with open("data/basics_tutorial.pkl", 'rb') as f:
X, Y, L, D = pickle.load(f)
from metal.utils import split_data
Ls = split_data(L, splits=[0.8, 0.1, 0.1], seed=123)
"""
Explanation: We reload the synthetic data from the basics tutorial.
End of explanation
"""
import os
from metal.contrib.visualization.analysis import view_label_matrix
# This if statement and others like it are for our continuous integration tests; you can ignore them.
if 'CI' not in os.environ:
view_label_matrix(Ls[0])
from metal.contrib.visualization.analysis import view_overlaps
if 'CI' not in os.environ:
view_overlaps(Ls[0])
from metal.contrib.visualization.analysis import view_conflicts
if 'CI' not in os.environ:
view_conflicts(Ls[0], normalize=False)
"""
Explanation: In this case, there isn't a lot to see, since these label matrices were created synthetically. However, these visualizations can often help you to discover unexpected phenomena in your label matrices, such as unexpectedly high or low conflict rates between certain labeling functions, or portions of your dataset with surprisingly low or high coverage.
End of explanation
"""
|
NKhan121/Portfolio | Model Evaluation/Model Evaluation .ipynb | mit | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
%matplotlib inline
df = pd.read_csv("car.csv")
df.head()
"""
Explanation: This Notebook will go through multiple models (KNN, Logistic Regression, Decision Trees, Support Vector Machines and Random Forest) to assess the best one.
End of explanation
"""
print df.buying.unique()
print df.maint.unique()
print df.doors.unique()
print df.persons.unique()
print df.lug_boot.unique()
print df.safety.unique()
print df.acceptability.unique()
"""
Explanation: Checking the unique values for each of the columns.
End of explanation
"""
map1 = {'low':1,
'med':2,
'high':3,
'vhigh':4}
map2 = {'small':1,
'med':2,
'big':3}
map3 = {'unacc':1,
'acc':2,
'good':3,
'vgood':4}
map4 = {'2': 2,
'4': 4,
'more': 5}
map5 = {'2': 2,
'3': 3,
'4': 4,
'5more': 5}
"""
Explanation: Using the information in the cell above, maps will be used to create a scale.
End of explanation
"""
features = [c for c in df.columns if c != 'acceptability']
#removing 'acceptability'
df1 = df.copy()
df1.buying= df.buying.map(map1)
df1.maint= df.maint.map(map1)
df1.doors = df.doors.map(map5)
df1.persons = df.persons.map(map4)
df1.lug_boot = df.lug_boot.map(map2)
df1.safety = df.safety.map(map1)
df1.acceptability = df.acceptability.map(map3)
X = df1[features]
y = df1['acceptability']
X.head(10)
#making sure it worked
"""
Explanation: Splitting up the needed features from my target which is acceptability.
End of explanation
"""
from sklearn.cross_validation import train_test_split, KFold
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, classification_report
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
def evaluate_model(model):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
a = accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
cr = classification_report(y_test, y_pred)
print cm
print cr
return a
various_models = {}
"""
Explanation: Train test split and creating a function to evaluate the models being created next.
End of explanation
"""
from sklearn.neighbors import KNeighborsClassifier
a = evaluate_model(KNeighborsClassifier())
from sklearn.grid_search import GridSearchCV
params = {'n_neighbors': range(2,60)}
gsknn = GridSearchCV(KNeighborsClassifier(),
params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gsknn.fit(X, y)
gsknn.best_params_
gsknn.best_score_
evaluate_model(gsknn.best_estimator_)
various_models['knn'] = {'model': gsknn.best_estimator_,
'score': a}
"""
Explanation: KNN Classifier
End of explanation
"""
from sklearn.ensemble import BaggingClassifier
baggingknn = BaggingClassifier(KNeighborsClassifier())
evaluate_model(baggingknn)
bagging_params = {'n_estimators': [10, 20],
'max_samples': [0.7, 1.0],
'max_features': [0.7, 1.0],
'bootstrap_features': [True, False]}
gsbaggingknn = GridSearchCV(baggingknn,
bagging_params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gsbaggingknn.fit(X, y)
gsbaggingknn.best_params_
various_models['gsbaggingknn'] = {'model': gsbaggingknn.best_estimator_,
'score': evaluate_model(gsbaggingknn.best_estimator_)}
"""
Explanation: Bagging KNN Classifier. Resulted in a small decrease in the score (from .944 to .940).
End of explanation
"""
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
various_models['lr'] = {'model': lr,
'score': evaluate_model(lr)}
params = {'C': [0.001, 0.01, 0.1, 1.0, 10.0, 100.0],
'penalty': ['l1', 'l2']}
gslr = GridSearchCV(lr,
params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gslr.fit(X, y)
print gslr.best_params_
print gslr.best_score_
various_models['gslr'] = {'model': gslr.best_estimator_,
'score': evaluate_model(gslr.best_estimator_)}
gsbagginglr = GridSearchCV(BaggingClassifier(gslr.best_estimator_),
bagging_params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gsbagginglr.fit(X, y)
print gsbagginglr.best_params_
print gsbagginglr.best_score_
various_models['gsbagginglr'] = {'model': gsbagginglr.best_estimator_,
'score': evaluate_model(gsbagginglr.best_estimator_)}
"""
Explanation: Now moving onto Logistic Regression.
End of explanation
"""
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
various_models['dt'] = {'model': dt,
'score': evaluate_model(dt)}
params = {'criterion': ['gini', 'entropy'],
'splitter': ['best', 'random'],
'max_depth': [None, 5, 10],
'min_samples_split': [2, 5],
'min_samples_leaf': [1, 2, 3]}
gsdt = GridSearchCV(dt,
params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gsdt.fit(X, y)
print gsdt.best_params_
print gsdt.best_score_
various_models['gsdt'] = {'model': gsdt.best_estimator_,
'score': evaluate_model(gsdt.best_estimator_)}
gsbaggingdt = GridSearchCV(BaggingClassifier(gsdt.best_estimator_),
bagging_params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gsbaggingdt.fit(X, y)
print gsbaggingdt.best_params_
print gsbaggingdt.best_score_
various_models['gsbaggingdt'] = {'model': gsbaggingdt.best_estimator_,
'score': evaluate_model(gsbaggingdt.best_estimator_)}
"""
Explanation: Decision Trees are next.
End of explanation
"""
from sklearn.svm import SVC
svm = SVC()
various_models['svm'] = {'model': svm,
'score': evaluate_model(svm)}
params = {'C': [0.01, 0.1, 1.0, 10.0, 30.0, 100.0],
'gamma': ['auto', 0.1, 1.0, 10.0],
'kernel': ['linear', 'rbf']}
gssvm = GridSearchCV(svm,
params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gssvm.fit(X, y)
print gssvm.best_params_
print gssvm.best_score_
various_models['gssvm'] = {'model': gssvm.best_estimator_,
'score': evaluate_model(gssvm.best_estimator_)}
gsbaggingsvm = GridSearchCV(BaggingClassifier(gssvm.best_estimator_),
bagging_params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gsbaggingsvm.fit(X, y)
print gsbaggingsvm.best_params_
print gsbaggingsvm.best_score_
various_models['gsbaggingsvm'] = {'model': gsbaggingsvm.best_estimator_,
'score': evaluate_model(gsbaggingsvm.best_estimator_)}
"""
Explanation: On to Support Vector Machines.
End of explanation
"""
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
rf = RandomForestClassifier()
various_models['rf'] = {'model': rf,
'score': evaluate_model(rf)}
et = ExtraTreesClassifier()
various_models['et'] = {'model': et,
'score': evaluate_model(et)}
params = {'n_estimators':[3, 5, 10, 50],
'criterion': ['gini', 'entropy'],
'max_depth': [None, 3, 5],
'min_samples_split': [2,5],
'class_weight':[None, 'balanced']}
gsrf = GridSearchCV(RandomForestClassifier(n_jobs=-1),
params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gsrf.fit(X, y)
print gsrf.best_params_
print gsrf.best_score_
various_models['gsrf'] = {'model': gsrf.best_estimator_,
'score': evaluate_model(gsrf.best_estimator_)}
gset = GridSearchCV(ExtraTreesClassifier(n_jobs=-1),
params, n_jobs=-1,
cv=KFold(len(y), n_folds=3, shuffle=True))
gset.fit(X, y)
print gset.best_params_
print gset.best_score_
various_models['gset'] = {'model': gset.best_estimator_,
'score': evaluate_model(gset.best_estimator_)}
"""
Explanation: Random Forests and Extra Trees are up next.
End of explanation
"""
scores = pd.DataFrame([(k, v['score']) for k, v in various_models.iteritems()],
columns=['model', 'score']).set_index('model').sort_values('score', ascending=False)
plt.style.use('fivethirtyeight')
scores.plot(kind='bar')
plt.ylim(0.5, 1.05)
scores
"""
Explanation: Creating a dataframe to compare the models.
End of explanation
"""
#Repeating the tests on my various models
from sklearn.cross_validation import cross_val_score, StratifiedKFold
def retest(model):
scores = cross_val_score(model, X, y,
cv=StratifiedKFold(y, shuffle=True),
n_jobs=-1)
m = scores.mean()
s = scores.std()
return m, s
for k, v in various_models.iteritems():
cvres = retest(v['model'])
print k,
various_models[k]['cvres'] = cvres
cvscores = pd.DataFrame([(k, v['cvres'][0], v['cvres'][1] ) for k, v in various_models.iteritems()],
columns=['model', 'score', 'error']).set_index('model').sort_values('score', ascending=False)
fig, ax = plt.subplots()
rects1 = ax.bar(range(len(cvscores)), cvscores.score,
yerr=cvscores.error,
tick_label=cvscores.index)
plt.style.use('fivethirtyeight')
ax.set_ylabel('Scores')
plt.xticks(rotation=70)
plt.ylim(0.5, 1.05)
cvscores
"""
Explanation: Both gridsearch bagging SVM and gridsearch SVM were identical in the above modeling process.
End of explanation
"""
|
nproctor/phys202-2015-work | assignments/assignment10/ODEsEx03.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
"""
Explanation: Ordinary Differential Equations Exercise 3
Imports
End of explanation
"""
g = 9.81 # m/s^2
l = 0.5 # length of pendulum, in meters
tmax = 50. # seconds
t = np.linspace(0, tmax, int(100*tmax))
"""
Explanation: Damped, driven nonlinear pendulum
The equations of motion for a simple pendulum of mass $m$, length $l$ are:
$$
\frac{d^2\theta}{dt^2} = \frac{-g}{\ell}\sin\theta
$$
When a damping and periodic driving force are added the resulting system has much richer and interesting dynamics:
$$
\frac{d^2\theta}{dt^2} = \frac{-g}{\ell}\sin\theta - a \omega - b \sin(\omega_0 t)
$$
In this equation:
$a$ governs the strength of the damping.
$b$ governs the strength of the driving force.
$\omega_0$ is the angular frequency of the driving force.
When $a=0$ and $b=0$, the energy/mass is conserved:
$$E/m =g\ell(1-\cos(\theta)) + \frac{1}{2}\ell^2\omega^2$$
Basic setup
Here are the basic parameters we are going to use for this exercise:
End of explanation
"""
def derivs(y, t, a, b, omega0):
"""Compute the derivatives of the damped, driven pendulum.
Parameters
----------
y : ndarray
The solution vector at the current time t[i]: [theta[i],omega[i]].
t : float
The current time t[i].
a, b, omega0: float
The parameters in the differential equation.
Returns
-------
dy : ndarray
The vector of derviatives at t[i]: [dtheta[i],domega[i]].
"""
dy0 = y[1]
dy1 = -g/l * np.sin(y[0]) - a*dy0 - b*np.sin(omega0*t)
return(dy0,dy1)
derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0)
assert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])
def energy(y):
"""Compute the energy for the state array y.
The state array y can have two forms:
1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.
2. It could be an ndim=2 array where each row is the [theta,omega] at single
time.
Parameters
----------
y : ndarray, list, tuple
A solution vector
Returns
-------
E/m : float (ndim=1) or ndarray (ndim=2)
The energy per mass.
"""
if np.ndim(y) == 1:
y = np.array([y])
z = np.shape(y)[0]
Em = g * l * (1 - np.cos([y[i][0] for i in range(z)])) + 1/2 * l**2 * (np.array([y[i][1] for i in range(z)]))**2
return Em
assert np.allclose(energy(np.array([np.pi,0])),g)
assert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))
"""
Explanation: Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\vec{y}(t) = (\theta(t),\omega(t))$.
End of explanation
"""
a=0
b=0
omega0=0
ans = odeint(derivs, np.array([np.pi,0]), t, args=(a,b,omega0), atol=10**(-5), rtol=10**(-4))
plt.plot(t, energy(ans))
plt.title("Energy of Simple Pendulum at ( $\pi$, 0 )")
plt.xlabel("Time")
plt.ylabel("Energy")
ax = plt.gca()
ax.set_axis_bgcolor("#fcfcfc")
plt.plot(t, np.transpose(ans)[0], label="Omega")
plt.plot(t, np.transpose(ans)[1], label="Theta")
plt.title("Simple Pendulum")
plt.xlabel("Time")
ax = plt.gca()
ax.set_axis_bgcolor("#fcfcfc")
plt.legend(loc ='lower right')
assert True # leave this to grade the two plots and their tuning of atol, rtol.
"""
Explanation: Simple pendulum
Use the above functions to integrate the simple pendulum for the case where it starts at rest pointing vertically upwards. In this case, it should remain at rest with constant energy.
Integrate the equations of motion.
Plot $E/m$ versus time.
Plot $\theta(t)$ and $\omega(t)$ versus time.
Tune the atol and rtol arguments of odeint until $E/m$, $\theta(t)$ and $\omega(t)$ are constant.
Anytime you have a differential equation with a a conserved quantity, it is critical to make sure the numerical solutions conserve that quantity as well. This also gives you an opportunity to find other bugs in your code. The default error tolerances (atol and rtol) used by odeint are not sufficiently small for this problem. Start by trying atol=1e-3, rtol=1e-2 and then decrease each by an order of magnitude until your solutions are stable.
End of explanation
"""
def plot_pendulum(a=0.0, b=0.0, omega0=0.0):
"""Integrate the damped, driven pendulum and make a phase plot of the solution."""
ans = np.transpose(odeint(derivs, np.array([-np.pi + 0.1,0]), t, args=(a,b,omega0)))
plt.plot(ans[0], ans[1])
plt.title("Damped Driven Pendulum")
plt.xlabel("Omega")
plt.ylim(-10,10)
plt.ylabel("Theta")
plt.grid(False)
ax = plt.gca()
ax.set_axis_bgcolor("white")
plt.xticks(np.linspace(-2*np.pi, 2*np.pi, 5), [r'$-2\pi$', r'$-\pi$', r'$0$', r'$\pi$', r'$2\pi$'])
"""
Explanation: Damped pendulum
Write a plot_pendulum function that integrates the damped, driven pendulum differential equation for a particular set of parameters $[a,b,\omega_0]$.
Use the initial conditions $\theta(0)=-\pi + 0.1$ and $\omega=0$.
Decrease your atol and rtol even futher and make sure your solutions have converged.
Make a parametric plot of $[\theta(t),\omega(t)]$ versus time.
Use the plot limits $\theta \in [-2 \pi,2 \pi]$ and $\theta \in [-10,10]$
Label your axes and customize your plot to make it beautiful and effective.
End of explanation
"""
plot_pendulum(0.5, 0.0, 0.0)
"""
Explanation: Here is an example of the output of your plot_pendulum function that should show a decaying spiral.
End of explanation
"""
interact(plot_pendulum, a=(0.0,10.0,0.1), b=(0.0,10.0, 0.1), omega0=(0,10.0,0.1));
"""
Explanation: Use interact to explore the plot_pendulum function with:
a: a float slider over the interval $[0.0,1.0]$ with steps of $0.1$.
b: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.
omega0: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.
End of explanation
"""
|
SRI-CSL/libpoly | examples/cad/SMT 2017 (Intro).ipynb | lgpl-3.0 | import polypy
"""
Explanation: Import the library.
End of explanation
"""
x = polypy.Variable('x')
[y, z] = [polypy.Variable(s) for s in ['y', 'z']]
"""
Explanation: Create variables $x$, $y$, $z$.
End of explanation
"""
order = polypy.variable_order
order.push(z)
order.push(y)
order
order.pop()
order.push(x)
order
"""
Explanation: Variable Ordering:
- global variable order as a list $order = [x_1, \ldots, x_n]$;
- $x \in order, y \notin order$: $x < y$;
- $x \in order, y \in order$: ordered according to the list
- $x \notin order, y \notin order$: order according to variable id
End of explanation
"""
f = (x**2 - 2*x + 1)*(x**2 - 2)
g = z*(x**2 - y**2)
"""
Explanation: Creating Polynomials:
- variables, constants
- arithmetic operations
$$f = (x^2 - 2x + 1)(x^2 - 2), g = z(x^2 - y^2)$$
End of explanation
"""
order.set([x, y, z])
print g
order.set([z, y])
print g
"""
Explanation: Variable order & Polynomials
End of explanation
"""
g.var()
g.degree()
g.coefficients()
g.derivative()
f.factor_square_free()
g.factor_square_free()
f
f.reductum()
"""
Explanation: Basic operations
End of explanation
"""
m = polypy.Assignment()
r = f.roots_isolate(m)
r
print r[0]
print r[1]
print r[2]
"""
Explanation: Root isolation:
- assignment maps variables to values
- we can isolate roots of univariate polynomials
End of explanation
"""
r[0].to_double()
r[0] > r[1]
r[0].get_value_between(r[1])
m.set_value(x, 0)
f.sgn(m)
m.set_value(x, r[0])
f.sgn(m)
"""
Explanation: Values can be integers, rationals, or algebraic numbers.
End of explanation
"""
|
eugeniopacceli/ComputerVision | quiz3/Quiz3 - Harris and SIFT.ipynb | mit | %matplotlib inline
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
import glob
import random as rnd
from scipy.ndimage import filters
from PIL import Image
from numpy import *
from pylab import *
from pandas import *
np.seterr(divide='ignore', invalid='ignore')
"""
Explanation: Quiz 3a - Implementation of the Harris Corner Detection
Eugenio Pacceli
Renato Oliveira
Brayan Acevedo
End of explanation
"""
#Compute the Algorithm Harris corner detecion for implementation in grayscale image
def compute_harris_points(img, sigma=3):
#compute derivates in the image
imx = np.zeros(img.size)
imy = np.zeros(img.size)
imx = filters.gaussian_filter(img, (sigma,sigma), (0,1))
imy = filters.gaussian_filter(img, (sigma,sigma), (1,0))
# compute the products of derivatives at every pixel
Sxx = filters.gaussian_filter(imx*imx,sigma)
Sxy = filters.gaussian_filter(imx*imy,sigma)
Syy = filters.gaussian_filter(imy*imy,sigma)
# determinant and trace
Mdet = Sxx*Syy - Sxy**2
Mtr = Sxx + Syy
harris = np.divide(Mdet, Mtr)
harris[np.isposinf(harris)] = 0
harris[np.isnan(harris)] = 0
return harris
"""
Explanation: First off, Harris detector computes a squared matrix M comprised basically of derivatives of image pixels on both x and y axis.
Then it calculates the corner responde number by subtracting the k*trace(M) from the determinant of M
End of explanation
"""
def doHarrisNonMaxSupression(harrisim,min_dist=10,threshold=0.1):
#Return corners from a Harris response image
#min_dist is the minimum number of pixels separating
#corners and image boundary.
global t
global dist
dist=min_dist
t=threshold
#print(t)
# find top corner candidates above a threshold
corner_threshold = harrisim.max() * threshold
harrisim_t = (harrisim > corner_threshold) * 1
# get coordinates of candidates
coords = array(harrisim_t.nonzero()).T
# ...and their values
candidate_values = [harrisim[c[0],c[1]] for c in coords]
# sort candidates
index = argsort(candidate_values)
# store allowed point locations in array
allowed_locations = zeros(harrisim.shape)
allowed_locations[min_dist:-min_dist,min_dist:-min_dist] = 1
# select the best points taking min_distance into account
filtered_coords = []
for i in index:
if allowed_locations[coords[i,0],coords[i,1]] == 1:
filtered_coords.append(coords[i])
allowed_locations[(coords[i,0]-min_dist):(coords[i,0]+min_dist),
(coords[i,1]-min_dist):(coords[i,1]+min_dist)] = 0
return filtered_coords
def plot_harris_points(image,filtered_coords):
#""" Plots corners found in image. """
plt.figure(figsize=(20,12))
gray()
plt.imshow(image)
plt.title('Harris corner detection, dist=%s and threshold=%s'%(dist,t))
plt.plot([p[1] for p in filtered_coords],[p[0] for p in filtered_coords],'*',color = 'r')
plt.axis('off')
plt.show()
"""
Explanation: Afterwards, Harris detector proceeds with non-maximal suppression assuming a certain threshold (0.1 in this case)
End of explanation
"""
im = Image.open('boat_images/img1.pgm')
plt.figure(figsize=(20,12))
gray()
plt.imshow(im, cmap = 'gray')
harrisim = compute_harris_points(im)
for i in range(1, 11,1):
xx=i* 0.01
j=10
filtered_coords = doHarrisNonMaxSupression(harrisim,j,xx)
plot_harris_points(im, filtered_coords)
"""
Explanation: Compute Harris for ten levels of threshold
Keypoints are shown as red stars
End of explanation
"""
def getGaussianKernel(sigma, kernelHeight=51, kernelWidth=51):
assert(kernelHeight % 2 == 1 and kernelWidth % 2 == 1)
yOffset = (kernelHeight - 1) / 2
xOffset = (kernelWidth - 1) / 2
kernel = np.ndarray((kernelHeight, kernelWidth), np.float64)
for y in range(-yOffset, yOffset+1, 1):
for x in range(-xOffset, xOffset+1, 1):
kernel[y+yOffset][x+xOffset] = (1. / (2.*np.pi*sigma**2)) * np.exp(-(x**2 + y**2) / (2 * sigma**2))
kernel /= kernel.sum()
return kernel
def calcGaussianPyramid(org_img):
img = org_img.copy()
bluredImg = img.copy()
sigma = 1.6
octaveCount = 7
sigmaCount = 4
gp = np.ndarray(shape=(octaveCount,), dtype=np.ndarray)
for o in range(0, octaveCount):
gp[o] = np.ndarray(shape=(sigmaCount+1, img.shape[0], img.shape[1]), dtype=np.float64)
gp[o][0] = bluredImg.copy()
for s in range(1, sigmaCount + 1):
#k = 2**(float(s)/float(sigmaCount))
k = np.sqrt(2.0)**s
kernel = getGaussianKernel(k*sigma)
bluredImg = cv2.filter2D(img, -1, kernel)
gp[o][s] = bluredImg.copy()
if (o < octaveCount-1):
img = downscale(img)
bluredImg = downscale(bluredImg)
return gp
def calcDifference(img0, img1, threshold = 0):
assert(img0.shape == img1.shape)
diffImg = np.ndarray(img0.shape, np.float64)
for y in range(diffImg.shape[0]):
for x in range(diffImg.shape[1]):
difference = abs(img1[y][x] - img0[y][x])
if difference > threshold:
diffImg[y][x] = difference
else:
diffImg[y][x] = 0
return diffImg
def calcDoG(gp):
DoG = np.ndarray(shape=gp.shape, dtype=np.ndarray)
for o in range(DoG.shape[0]):
DoG[o] = np.ndarray(shape=(gp[o].shape[0]-1, gp[o].shape[1], gp[o].shape[2]), dtype=np.float64)
for s in range(DoG[o].shape[0]):
DoG[o][s] = calcDifference(gp[o][s], gp[o][s+1])
return DoG
def getNeighbourhood(octave, s, y, x, radius=1):
neighbourhood = octave[s-radius:s+radius+1, y-radius:y+radius+1, x-radius:x+radius+1]
return neighbourhood
def calcExtrema(DoG, threshold=0.3, radius=1):
keypoints = np.ndarray(shape=DoG.shape, dtype=np.ndarray)
sigma = 1.6
sigmaCount = DoG[0].shape[0]
for o in range(DoG.shape[0]):
keypoints[o] = np.ndarray(shape=(DoG[o].shape[0]-(2*radius),), dtype=list)
for s in range(radius, DoG[o].shape[0]-radius):
keypoints[o][s-radius] = []
k = 2**(float(s)/float(sigmaCount))
for y in range(radius, DoG[o].shape[1]-radius):
for x in range(radius, DoG[o].shape[2]-radius):
value = DoG[o][s, y, x]
neighbourhood = getNeighbourhood(DoG[o], s, y, x, radius=radius).flatten()
neighbourhood.sort()
min2 = neighbourhood[1]
max2 = neighbourhood[-2]
if value < min2 or (value > threshold and value > max2):
scale = 2**o
keypoints[o][s-radius].append((scale * y + scale/2, scale * x + scale/2, scale * k*sigma))
return keypoints
def normalize(img):
normImg = np.ndarray(shape=img.shape, dtype=np.float64)
max_val = img.max()
if max > 0:
normImg = img/float(max_val)
normImg *= 255.
else:
return img.copy()
return normImg.astype(np.uint8)
def scale(img, factor=2):
assert(len(img.shape) == 2)
rows, cols = img.shape
scaledImg = np.ndarray((rows*factor, cols*factor), np.float64)
for y in range(0, scaledImg.shape[0]):
for x in range(0, scaledImg.shape[1]):
scaledImg[y][x] = img[y/factor][x/factor]
return scaledImg
def downscale(img):
assert(len(img.shape) == 2)
rows, cols = img.shape
scaledImg = np.ndarray((rows/2, cols/2), np.float64)
for y in range(0, scaledImg.shape[0]):
for x in range(0, scaledImg.shape[1]):
scaledImg[y][x] = img[2*y][2*x]
return scaledImg
def drawKeypoints(img, kp):
if (len(img.shape) < 3 or img.shape[2] == 1):
kpImg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
kpImg = img.copy()
for y, x, scale in kp:
r = rnd.randrange(0,255)
g = rnd.randrange(0,255)
b = rnd.randrange(0,255)
cv2.circle(kpImg, (int(x), int(y)), int(scale), (r, g, b))
return kpImg
def plotImage(title,image):
plt.figure(figsize=(20,12))
gray()
plt.imshow(image)
plt.title(title)
plt.axis('off')
plt.show()
"""
Explanation: Quiz 3b - Implementation of the SIFT Feature Detector
End of explanation
"""
images = glob.glob('boat_images/img_*.png')
print('Images Loaded!')
for filename in images:
img = cv2.imread(filename, 0)
gp = calcGaussianPyramid(img)
DoG = calcDoG(gp)
radius = 1
keypoints = calcExtrema(DoG, radius=radius)
kpImg = img.copy()
for o in range(keypoints.shape[0]):
for s in range(radius, DoG[o].shape[0]-radius):
kp = keypoints[o][s-radius]
kpImg = drawKeypoints(kpImg, kp)
plotImage("Custom SIFT "+ filename, kpImg)
"""
Explanation: Execute custom SIFT for every given image in the current path then show keypoints as circles in the correspoding images.
SIFT algorithm begins detecting points that are invariant to scale changes. This is achieved by building a scale-space function named L(x,y,sigma) that consists of a convolution of a gaussian function and the image for every pixel. Following Lowe's paper (LOWE,2004) to efficiently detect keypoint locations in space scale we have proposed using a space scale extrema in the difference of Gaussian function convolved with the image.
In this implementation, the functions calcGaussianPyramid and getGaussianKernel compute the octaves and build a scape space for the image.
Afterwards, the difference of two consecutive guassian spaces separated by a constant factor k is carried out. This is implemented in the function calcDoG
Now, a local extrema detection step is executed. As Lowe stated each sample point is compared to each of its neighbors in the current image and nine of neighbors in the scale above and below.
Only the largest or the smallest of the points is selected as a candidate keypoint extrema.
To improve matching and stability a detailed fit to nearby data for location, scale and ratio of principal curvatures is performed. The function calcExtrema executes this step. This eliminates points with low contrast or that poorly localized along an edge.
And finally, the selected keypoints are drawn on the image.
End of explanation
"""
#img = (Image.open('boat_images/img1.pgm').convert('L'))
#img.save('boat_images/img_1.png')
#img = (Image.open('boat_images/img2.pgm').convert('L'))
#img.save('boat_images/img_2.png')
#img = (Image.open('boat_images/img3.pgm').convert('L'))
#img.save('boat_images/img_3.png')
#img = (Image.open('boat_images/img4.pgm').convert('L'))
#img.save('boat_images/img_4.png')
#img = (Image.open('boat_images/img5.pgm').convert('L'))
#img.save('boat_images/img_5.png')
#img = (Image.open('boat_images/img6.pgm').convert('L'))
#img.save('boat_images/img_6.png')
images = glob.glob('boat_images//img*.pgm')
for filename in images:
img = cv2.imread(filename, 0)
sift = cv2.SIFT()
kp, desc = sift.detectAndCompute(img, None)
imgfinal=cv2.drawKeypoints(img,kp,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plotImage("SIFT Opencv "+filename, imgfinal)
## matching
sift = cv2.SIFT()
imgA = cv2.imread('boat_images//img1.pgm', 0)
imgB = cv2.imread('boat_images//img2.pgm', 0)
kpA, desA = sift.detectAndCompute(imgA,None)
kpB, desB = sift.detectAndCompute(imgB,None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(desA,desB, k=2)
#img3 = cv2.drawMatchesKnn(imgA,kpA,imgB,kpB,good,flags=2)
#plt.imshow(img3)
#plt.show()
"""
Explanation: SIFT Feature Detector - Opencv Implementation
End of explanation
"""
|
hbutler/InverseCCP | 5 - Generate coupon probabilities - part 3.ipynb | mit | %matplotlib inline
import numpy as np
from numpy.random import beta as npbeta
from random import betavariate as pybeta
from scipy.stats import beta as scibeta
from matplotlib import pyplot as plt
from numpy import arange, vectorize
import timeit
start = timeit.default_timer()
for i in np.arange(1000000):
t = np.random.rand()
et = timeit.default_timer() - start
print(et)
start = timeit.default_timer()
for i in np.arange(1000000):
t = npbeta(1,1)
et = timeit.default_timer() - start
print(et)
start = timeit.default_timer()
for i in np.arange(1000000):
t = pybeta(1,1)
et = timeit.default_timer() - start
print(et)
start = timeit.default_timer()
for i in np.arange(1000000):
t = scibeta.rvs(1,1)
et = timeit.default_timer() - start
print(et)
"""
Explanation: The previous GenerateProbs and GenerateProbsPart2 focused on creating a CDF that can be indexed with a uniform random number to determine the coupon draw. But what if we went the other way? Each coupon occupies 1/N amount of space, but instead of a uniform random number, we draw a random variate from some distribution (probably beta) and that determines the likelihood that a coupon gets drawn. It's way easier to set up, but there may be a slowdown because we now draw random variates instead of uniform random numbers. Let's time a few options to generate beta distributed numbers.
End of explanation
"""
def single_run_looped(n, dist, alpha, beta):
"""
This is a single run of the CCP.
n = number of unique coupons
m_max = max number of draws to simulate (should be much greater than n)
dist = how are the coupon probabilities distributed (uniform, normal, exponential)
norm_scale = how much to scale the normal distribution standard deviation (0<= norm_scale <1)
"""
m = 0 #start at zero draws
cdf = (arange(n)+1.0)/n #create the draw probability distribution
draws = [] #create our draw array
uniques = [] #create our unique array (deque is faster but may break DB inserts - research)
unique = 0
while True:
m+=1 #increment our draw counter
rv = npbeta(alpha, beta) #randomness that decides which coupon to draw
draw = (cdf>rv).sum()
if draw not in draws:
draws.append(draw)
unique+=1
uniques.append(unique) #store the info
if unique==n:#we'll stop once we have drawn all our coupons
return m #this line returns the number of draws; for testing
#return uniques #this line returns the full unique draws list; the actual data we want to record
vectorized_single_run_looped = vectorize(single_run_looped)
start = timeit.default_timer()
#test our sim with known results on uniform probs
trials = 200000
n = 10
records = vectorized_single_run_looped([n]*trials, ['beta']*trials, [1.0]*trials, [1.0]*trials)
num_fails = np.where(records==0)
average = np.mean(records)
std_error = np.std(records)/np.sqrt(trials)
z_crit = 1.96
low_ci = average - z_crit*std_error
high_ci = average + z_crit*std_error
expectation = np.asarray([1/(n+1) for n in np.arange(n)]).sum()*n
et = timeit.default_timer()-start
print ("num_fails: ", len(num_fails[0]))
print("low_ci: ", low_ci, "point_est: ", average, "high_ci: ", high_ci)
print("expected value: ", expectation)
print("elapsed_time: ", et)
"""
Explanation: It looks like using random variates will make the sim run a little slower, but not by a crazy amount if we use numpy's variate generation. Stay away from pure python and scipy's version though, they'll kill the speed.
Now that we are drawing coupons using random variates instead of generating a cdf, we can simulate the ccp with a single function:
End of explanation
"""
|
jdossgollin/CWC_ANN | Week02/00-Sandbox.ipynb | mit | import numpy as np
import keras
from keras.datasets import mnist # load up the training data!
from keras.models import Sequential # our model
from keras.layers import Dense, Dropout, Flatten # layers we've seen
from keras.layers import Conv2D, MaxPooling2D # new layers
from keras import backend as K # see later
"""
Explanation: Sandbox
Use this space to play around with keras models and the MNIST training data!
End of explanation
"""
batch_size = 128
num_classes = 10
epochs = 10
"""
Explanation: Specify some parameters
End of explanation
"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
"""
Explanation: Load in training data.
If you want to do a convolutional layer you'll need to reshape data like in 03-MNIST_CNN.ipynb!
End of explanation
"""
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax')) # remember y has 10 categories!
"""
Explanation: Build Model
This code defines a very simple model -- add more layers!
End of explanation
"""
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
"""
Explanation: Compile Model
Try playing around with different optimizers, loss functions, and more
End of explanation
"""
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
"""
Explanation: Model Fit
Try playing with batch size and epochs
End of explanation
"""
model.summary()
"""
Explanation: Summarize Model
End of explanation
"""
|
aaronvincent/nuFATE | examples/notebook.ipynb | mit | # gamma = 2 #spectral index of incoming neutrino flux
gamma = '../resources/phiHGextrap.dat' #this is how you would specify an incoming flux from a file. Needs to be 200x1, on the same energy grid as below
flavor = 3 # 1 = nu_e, 2= nu_mu, 3= nu_tau. Negative for antineutrinos
Na = 6.0221415e23
def get_avg_attn(flavor,gamma,h5filename = "../resources/NuFATECrossSections.h5"):
w,v,ci,energy_nodes,phi_0 = cas.get_eigs(flavor,gamma,h5filename)
tlength = 230
tvec = np.linspace(-1,0,tlength)
phiv = energy_nodes*0.
for ctheta in tvec:
t = earth.get_t_earth(np.arccos(ctheta))*Na # g/ cm^2
phisol = np.dot(v,(ci*np.exp(w*t)))/phi_0
phiv = phiv + phisol
phiv = phiv/tlength
return energy_nodes, phiv
def get_avg_attn_secs(flavor,gamma,h5filename = "../resources/NuFATECrossSections.h5"):
w,v,ci,energy_nodes,phi_0 = csx.get_eigs(flavor,gamma,h5filename)
tlength = 230
tvec = np.linspace(-1,0,tlength)
phiv = phi_0*0.
for ctheta in tvec:
t = earth.get_t_earth(np.arccos(ctheta))*Na # g/ cm^2
phisol = np.dot(v,(ci*np.exp(w*t)))/phi_0
phiv = phiv + phisol
phiv = phiv/tlength
phiv = phiv[0:200] #only keep non-tau bit
return energy_nodes, phiv
"""
Explanation: Helper functions definitions
End of explanation
"""
flavor = -3
zenith = np.radians(100.)
w,v,ci,energy_nodes,phi_0 = cas.get_eigs(flavor,gamma, "../resources/NuFATECrossSections.h5")
t = earth.get_t_earth(zenith)*Na
phisol = np.dot(v,(ci*np.exp(w*t)))/phi_0
plt.figure(figsize=(6,5))
xsh5 = tables.open_file("../resources/NuFATECrossSections.h5","r")
sigma_array = xsh5.root.total_cross_sections.nutaubarxs[:]
xsh5.close()
plt.semilogx(energy_nodes,np.exp(-sigma_array*t),c='b',lw=2)
plt.semilogx(energy_nodes,phisol,c='r',lw=2)
plt.legend(['Exponential suppression', 'Full cascade solution'],
loc="upper right")
plt.xlabel(r"Neutrino Energy (GeV)")
plt.ylim(0.,1.)
plt.ylabel(r"Attenuation")
plt.grid()
from matplotlib import rc, rcParams
rc('text',usetex=True)
rc('font',**{'family':'serif','serif':['Computer Modern'], 'size' : 18})
cols = ['#29A2C6','#FF6D31','#FFCB18','#73B66B','#EF597B', '#333333']
font = {'family' : 'serif',
'weight' : 'bold',
'size' : 18}
"""
Explanation: Example Earth attenuation for single flavor and zenith
End of explanation
"""
energy_nodes, phim3 = get_avg_attn(-3,gamma)
energy_nodes, phim2 = get_avg_attn(-2,gamma)
energy_nodes, phim1 = get_avg_attn(-1,gamma)
energy_nodes, phi3 = get_avg_attn(3,gamma)
energy_nodes, phi2 = get_avg_attn(2,gamma)
energy_nodes, phi1 = get_avg_attn(1,gamma)
plt.figure(figsize=(6,5))
plt.semilogx(energy_nodes,phim1,linestyle='--',c='m',label="NuEBar")
plt.semilogx(energy_nodes,phim2,linestyle='--',c='g',label="NuMuBar")
plt.semilogx(energy_nodes,phim3,linestyle='--',c='b',label="NuTauBar")
plt.semilogx(energy_nodes,phi1,c='r',label="NuE")
plt.semilogx(energy_nodes,phi2,c='g',label="NuMu")
plt.semilogx(energy_nodes,phi3,c='b',label="NuTau")
plt.xlim(1e3,1e10)
plt.ylim(0.0,1.)
plt.legend(loc="lower left")
plt.xlabel("Neutrino Energy (GeV)")
plt.ylabel(r"Attenuation")
plt.minorticks_on()
plt.grid()
gamma = 2
flavor = -1
w,v,ci,energy_nodes,phi_0 = cas.get_eigs(flavor,gamma,"../resources/NuFATECrossSections.h5")
tlength = 100
Eindex = 80 #which energy index do you want to see?
d = 0 #at what depth do you want the detection point? default set to 0 km.
tvec = np.linspace(0,1,tlength)
phiv1 = tvec*0
phiv2 = tvec*0
phiv3 = tvec*0
phiv4 = tvec*0
phiv5 = tvec*0
for i in range(0,tlength):
ctheta = tvec[i]
t = earth.get_t_earth(np.arccos(-ctheta), d)*Na # g/ cm^2
phisol = np.dot(v,(ci*np.exp(w*t)))*energy_nodes**(gamma-2.)
phiv1[i] = phisol[0]
phiv2[i] = phisol[25]
phiv3[i] = phisol[50]
phiv4[i] = phisol[75]
phiv5[i] = phisol[99]
plt.figure(figsize=(6,5))
plt.plot(tvec,phiv1,lw=2)
plt.plot(tvec,phiv2,lw=2)
plt.plot(tvec,phiv3,lw=2)
plt.plot(tvec,phiv4,lw=2)
plt.plot(tvec,phiv5,lw=2)
plt.legend(['1 TeV','18 TeV','335 TeV','6.1 PeV','10 PeV'],loc="lower left")
plt.xlabel("-cos(zenith)")
plt.ylabel("Attenuation")
plt.title('NuEBar')
plt.minorticks_on()
plt.ylim(0.,1)
plt.grid()
"""
Explanation: Calculating and plotting zenith averaged Earth attenuation
End of explanation
"""
energy_nodes,phim2s = get_avg_attn_secs(-2,gamma)
energy_nodes,phim1s = get_avg_attn_secs(-1,gamma)
energy_nodes,phi1s = get_avg_attn_secs(1,gamma)
energy_nodes,phi2s = get_avg_attn_secs(2,gamma)
plt.figure(figsize=(6,5))
plt.semilogx(energy_nodes,phim1,linestyle='--',c='r')
plt.semilogx(energy_nodes,phim2,linestyle='--',c='g')
plt.semilogx(energy_nodes,phim3,linestyle='--',c='b')
plt.semilogx(energy_nodes,phi1,c='r')
plt.semilogx(energy_nodes,phi2,c='g')
plt.semilogx(energy_nodes,phi3,c='b')
plt.semilogx(energy_nodes,phi1s,c='r',linestyle='-.')
plt.semilogx(energy_nodes,phi2s,c='g',linestyle=':')
plt.semilogx(energy_nodes,phim1s,c='r',linestyle='-.')
plt.semilogx(energy_nodes,phim2s,c='g',linestyle=':')
plt.xlim(1e3,1e10)
plt.ylim(.0,1)
plt.ylabel("Attenuation")
plt.xlabel("Neutrino Energy (GeV)")
plt.minorticks_on()
plt.grid()
"""
Explanation: Calculating and plotting zenith averaged Earth attenuation with secondaries
End of explanation
"""
|
FavioVazquez/practical_introduction_to_functional_programming | PracticalFunctionalProgramming-Python.ipynb | mit | a = 0
def increment1():
global a
a += 1
"""
Explanation: A practical introduction to functional programming
Many functional programming articles teach abstract functional techniques. That is, composition, pipelining, higher order functions. This one is different. It shows examples of imperative, unfunctional code that people write every day and translates these examples to a functional style.
The first section of the article takes short, data transforming loops and translates them into functional maps and reduces. The second section takes longer loops, breaks them up into units and makes each unit functional. The third section takes a loop that is a long series of successive data transformations and decomposes it into a functional pipeline.
The examples are in Python, because many people find Python easy to read. A number of the examples eschew pythonicity in order to demonstrate functional techniques common to many languages: map, reduce, pipeline.
A guide rope
When people talk about functional programming, they mention a dizzying number of “functional” characteristics. They mention immutable data, first class functions and tail call optimisation. These are language features that aid functional programming. They mention mapping, reducing, pipelining, recursing, currying and the use of higher order functions. These are programming techniques used to write functional code. They mention parallelization, lazy evaluation and determinism. These are advantageous properties of functional programs.
Ignore all that. Functional code is characterised by one thing: the absence of side effects. It doesn’t rely on data outside the current function, and it doesn’t change data that exists outside the current function. Every other “functional” thing can be derived from this property. Use it as a guide rope as you learn.
This is an unfunctional function:
End of explanation
"""
def increment2(a):
return a + 1
"""
Explanation: This is a functional function:
End of explanation
"""
name_lengths = map(len, ["Mary", "Isla", "Sam"])
for i in name_lengths:
print(i)
"""
Explanation: Don’t iterate over lists. Use map and reduce.
Map
Map takes a function and a collection of items. It makes a new, empty collection, runs the function on each item in the original collection and inserts each return value into the new collection. It returns the new collection.
This is a simple map that takes a list of names and returns a list of the lengths of those names:
End of explanation
"""
squares = map(lambda x: x * x, [0, 1, 2, 3, 4])
for i in squares:
print(i)
"""
Explanation: This is a map that squares every number in the passed collection:
End of explanation
"""
import random
names = ['Mary', 'Isla', 'Sam']
code_names = ['Mr. Pink', 'Mr. Orange', 'Mr. Blonde']
for i in range(len(names)):
names[i] = random.choice(code_names)
print(names)
"""
Explanation: This map doesn’t take a named function. It takes an anonymous, inlined function defined with lambda. The parameters of the lambda are defined to the left of the colon. The function body is defined to the right of the colon. The result of running the function body is (implicitly) returned.
The unfunctional code below takes a list of real names and replaces them with randomly assigned code names.
End of explanation
"""
import random
names = ['Mary', 'Isla', 'Sam']
secret_names = map(lambda x: random.choice(['Mr. Pink',
'Mr. Orange',
'Mr. Blonde']),
names)
for i in secret_names:
print(i)
"""
Explanation: (As you can see, this algorithm can potentially assign the same secret code name to multiple secret agents. Hopefully, this won’t be a source of confusion during the secret mission.)
End of explanation
"""
names = ['Mary', 'Isla', 'Sam']
for i in range(len(names)):
names[i] = hash(names[i])
print(names)
"""
Explanation: Exercise 1. Try rewriting the code below as a map. It takes a list of real names and replaces them with code names produced using a more robust strategy.
End of explanation
"""
names = ['Mary', 'Isla', 'Sam']
secret_names = map(hash, names)
for i in secret_names:
print(i)
"""
Explanation: (Hopefully, the secret agents will have good memories and won’t forget each other’s secret code names during the secret mission.)
My solution:
End of explanation
"""
from functools import reduce #Since Python 3
mysum = reduce(lambda a, x: a + x, [0, 1, 2, 3, 4])
print(mysum)
"""
Explanation: Reduce
Reduce takes a function and a collection of items. It returns a value that is created by combining the items.
This is a simple reduce. It returns the sum of all the items in the collection.
End of explanation
"""
sentences = ['Mary read a story to Sam and Isla.',
'Isla cuddled Sam.',
'Sam chortled.']
sam_count1 = 0
for sentence in sentences:
sam_count1 += sentence.count('Sam')
print(sam_count1)
sentences = ['Mary read a story to Sam and Isla.',
'Isla cuddled Sam.',
'Sam chortled.']
sam_count2 = reduce(lambda a, x: a + x.count('Sam'),
sentences,
0)
print(sam_count2)
"""
Explanation: x is the current item being iterated over. a is the accumulator. It is the value returned by the execution of the lambda on the previous item. reduce() walks through the items. For each one, it runs the lambda on the current a and x and returns the result as the a of the next iteration.
What is a in the first iteration? There is no previous iteration result for it to pass along. reduce() uses the first item in the collection for a in the first iteration and starts iterating at the second item. That is, the first x is the second item.
This code counts how often the word 'Sam' appears in a list of strings:
End of explanation
"""
people = [{'name': 'Mary', 'height': 160},
{'name': 'Isla', 'height': 80},
{'name': 'Sam'}]
height_total = 0
height_count = 0
for person in people:
if 'height' in person:
height_total += person['height']
height_count += 1
if height_count > 0:
average_height = height_total / height_count
print(average_height)
"""
Explanation: How does this code come up with its initial a? The starting point for the number of incidences of 'Sam' cannot be 'Mary read a story to Sam and Isla.' The initial accumulator is specified with the third argument to reduce(). This allows the use of a value of a different type from the items in the collection.
Why are map and reduce better?
First, they are often one-liners.
Second, the important parts of the iteration - the collection, the operation and the return value - are always in the same places in every map and reduce.
Third, the code in a loop may affect variables defined before it or code that runs after it. By convention, maps and reduces are functional.
Fourth, map and reduce are elemental operations. Every time a person reads a for loop, they have to work through the logic line by line. There are few structural regularities they can use to create a scaffolding on which to hang their understanding of the code. In contrast, map and reduce are at once building blocks that can be combined into complex algorithms, and elements that the code reader can instantly understand and abstract in their mind. “Ah, this code is transforming each item in this collection. It’s throwing some of the transformations away. It’s combining the remainder into a single output.”
Fifth, map and reduce have many friends that provide useful, tweaked versions of their basic behaviour. For example: filter, all, any and find.
Exercise 2. Try rewriting the code below using map, reduce and filter. Filter takes a function and a collection. It returns a collection of every item for which the function returned True.
End of explanation
"""
people = [{'name': 'Mary', 'height': 160},
{'name': 'Isla', 'height': 80},
{'name': 'Sam'}]
heights = map(lambda x: x['height'],
filter(lambda x: 'height' in x, people))
heightsList = list(heights) # Neccesary in Python3
if len(heightsList) > 0:
from operator import add
average_height = reduce(add, heightsList) / len(heightsList)
print(average_height)
"""
Explanation: If this seems tricky, try not thinking about the operations on the data. Think of the states the data will go through, from the list of people dictionaries to the average height. Don’t try and bundle multiple transformations together. Put each on a separate line and assign the result to a descriptively-named variable. Once the code works, condense it.
End of explanation
"""
from random import random
time = 5
car_positions = [1, 1, 1]
while time:
# decrease time
time -= 1
print('')
for i in range(len(car_positions)):
# move car
if random() > 0.3:
car_positions[i] += 1
# draw car
print('-' * car_positions[i])
"""
Explanation: Write declaratively, not imperatively
The program below runs a race between three cars. At each time step, each car may move forwards or it may stall. At each time step, the program prints out the paths of the cars so far. After five time steps, the race is over.
-
--
--
--
--
---
---
--
---
----
---
----
----
----
-----
This is the program:
End of explanation
"""
from random import random
def move_cars():
for i, _ in enumerate(car_positions):
if random() > 0.3:
car_positions[i] += 1
def draw_car(car_position):
print('-' * car_position)
def run_step_of_race():
global time
time -= 1
move_cars()
def draw():
print('')
for car_position in car_positions:
draw_car(car_position)
time = 5
car_positions = [1, 1, 1]
while time:
run_step_of_race()
draw()
"""
Explanation: The code is written imperatively. A functional version would be declarative. It would describe what to do, rather than how to do it.
Use functions
A program can be made more declarative by bundling pieces of the code into functions.
End of explanation
"""
|
bblais/Classy | examples/Example Text Classification.ipynb | mit | count,feature_names=text.count_letters('data/languages/E3.txt')
print((count,feature_names))
count,feature_names=text.count_letters('data/languages/E3.txt')
print((count,feature_names))
p=text.letter_freq('English',feature_names)
print(p)
print((sum(count*log10(p))))
C=text.LanguageFileClassifier()
result=C.loglikelihood('data/languages/E*.txt',verbose=True)
C.predict('data/languages/E*',verbose=True)
[C.target_names[i] for i in C.predict('data/languages/E*')]
"""
Explanation: Language Classifier
End of explanation
"""
from classy import text
train=text.load_files('data/films/train',verbose=True)
test=text.load_files('data/films/test',verbose=True)
train,test=text.text_to_vectors('data/films/train','data/films/test',verbose=True)
train.vectors
v=array(train.vectors[0,:].todense()).ravel()
v.max()
v.shape
v=array(train.vectors[0,:].todense()).ravel()
plot(v,'.')
v=array(train.vectors[10,:].todense()).ravel()
plot(v,'.')
xlabel('feature number')
ylabel('frequency of feature')
train.vectors.shape
C=text.Multinomial()
C.fit(train.vectors,train.targets)
C.predict(test.vectors)
C.percent_correct(test.vectors,test.targets)
"""
Explanation: Text Classification from Folders
End of explanation
"""
from classy import *
train_files=text.load_files('data/films/train',verbose=True)
test_files=text.load_files('data/films/test',verbose=True)
train_data,test_data=text.text_to_vectors(train_files,test_files,verbose=True)
train_data.vectors
vectors_to_image(train_data.vectors,binary=True)
vectors_to_image(train_data.vectors,binary=False)
"""
Explanation: Footnote
End of explanation
"""
from classy import text
train_files=text.load_files('data/films/train',verbose=True)
test_files=text.load_files('data/films/test',verbose=True)
train_data,test_data=text.text_to_vectors(train_files,test_files,verbose=True)
train_data.vectors
"""
Explanation: Bigrams/Trigrams
End of explanation
"""
train_data,test_data=text.text_to_vectors(train_files,test_files,ngram_range=(1,2),verbose=True)
train_data.vectors
print((train_data.feature_names[:100]))
"""
Explanation: specify the ngram_range - the smallest ngram to use, and the largest. the default is (1,1), so only 1-grams are used. this example calculates the 1-gram and the 2-gram (bi-gram)
End of explanation
"""
|
FluVigilanciaBR/fludashboard | Notebooks/historical_estimated_values.ipynb | gpl-3.0 | # local
from fludashboard.libs.flu_data import prepare_keys_name
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
"""
Explanation: Table of Contents
Detailed panel
Weekly incidence curve with typical intensity and thresholds
Function for incidence plot:
State example
Regional example
Example with state where estimates are not available:
Obtaining the most probable activity level at selected week
Age distribution
Function for age distribution plot:
Incidence table information
Summary panel
Season level categorization:
Function to calculate seasonal level
Example applying to a given entry
Applying to the whole dataset
Seasonal age distribution
Incidence table information
Displaying data for user selected week
Detailed panel<a name="_detailed panel"></a>
Weekly incidence curve with typical intensity and thresholds<a name="_weekly incidence curve with typical intensity and thresholds"></a>
End of explanation
"""
df_hist = pd.read_csv('../data/historical_estimated_values.csv', encoding='utf-8')
df_inci = pd.read_csv('../data/current_estimated_values.csv', encoding='utf-8')
df_typi = pd.read_csv('../data/mem-typical.csv', encoding='utf-8')
df_thre = pd.read_csv('../data/mem-report.csv', encoding='utf-8')
prepare_keys_name(df_hist)
prepare_keys_name(df_inci)
prepare_keys_name(df_typi)
prepare_keys_name(df_thre)
level_dict = {
'L0': 'Baixa',
'L1': 'Epidêmica',
'L2': 'Alta',
'L3': 'Muito alta'
}
df_inci.columns
"""
Explanation: In this example, we show the current year incidence up to given week.<br>
Along with the current incidence, we present the following intensity thresholds:<br>
Low activity threshold: estimated epidemic threshold based on historical levels. Minimum: incidence equivalent to 5 cases.
High activity threshold: incidence considered high based on historical levels. Minimum: incidence equivalent to 10 cases.
Very high activity threshold: incidence considered very high based on historical levels. Minimum: incidence equivalent to 20 cases.
End of explanation
"""
df_inci.head(5)
df_typi.head(5)
df_thre.tail(5)
"""
Explanation: UF: locality code (includes UFs, Regions and Country)
Tipo: locality type (Estado, Regional or País)
mean: estimated mean incidence
50%: estimated median
2.5%: estimation lower 95% confidence interval
97.5%: estimation upper 95% confidence interval
L0: probability of being below epi. threshold (low level)
L1: probability of being above epi. threshold and below high activity (epidemic level)
L2: prob. of being above high activity and below very high (high level)
L3: prob. of being above very high activity threshold (very high level)
Situation:
stable: might suffer minor changes in the future. Reliable as is;
estimated: data estimated based on opportunity (i.e. notification delay) profile. Reliable within confidence interval;
unknown: might suffer significant changes in the coming weeks. This is the case for locations where estimation is not possible and data is still "fresh". Unreliable.
End of explanation
"""
k = ['epiyear', 'epiweek', 'base_epiyear', 'base_epiweek']
df_inci2017 = df_inci[
(df_inci.epiyear == 2017) &
# (df_inci.epiweek >= 15) &
(df_inci.dado == 'srag') &
(df_inci.escala == 'incidência') &
(df_inci.uf == 'BR')
].copy()
df_inci2017.sort_values(['epiyear', 'epiweek'], inplace=True)
df_inci_chart = df_inci2017.copy()
df_inci_chart.index = df_inci_chart.epiweek
k = ['epiyear', 'epiweek', 'base_epiyear', 'base_epiweek']
df_hist2017 = df_hist[
(df_hist.base_epiyear == 2017) &
(df_hist.base_epiweek == 23) &
(df_hist.dado == 'srag') &
(df_hist.escala == 'incidência') &
(df_hist.uf == 'BR')
].copy()
df_hist2017.sort_values(['epiyear', 'epiweek'], inplace=True)
df_hist_chart = df_hist2017.copy()
df_hist_chart.index = df_hist_chart.epiweek
# 50% estimated cases
df_inci_chart[['srag', '50%', '2.5%', '97.5%']].plot()
plt.title('Incidence')
plt.grid(True)
plt.show()
df_hist_chart[['srag', '50%', '2.5%', '97.5%']].plot()
plt.title('Historial')
plt.grid(True)
plt.show()
df_hist2017['estimated_cases'] = df_hist2017['50%']
df = pd.merge(
df_inci2017[['epiweek', 'srag', '2.5%', '97.5%']],
df_hist2017[['epiweek', 'estimated_cases']],
on='epiweek', how='outer'
)
df.set_index('epiweek', inplace=True)
df.plot()
plt.grid(True)
plt.title('Incidence X Historial')
plt.show()
"""
Explanation: Entries with dfthresholds['se típica do inicio do surto'] = NaN have activity too low for proper epidemic threshold definition
End of explanation
"""
df_hist[
(df_hist.base_epiyear == 2017) &
(df_hist.dado == 'srag') &
(df_hist.escala == 'incidência') &
(df_hist.uf == 'BR')
].base_epiweek.unique()
# First, last keep only stable weeksfor notification curve:
df_inci2017.loc[(df_inci2017.situation != 'stable'), 'srag'] = np.nan
# Adapt historical dataset:
df_hist.sort_values(['epiyear', 'epiweek'], inplace=True)
df_hist['estimated_cases'] = df_hist['50%']
# User selected week:
y = 2017
w = 23
def week_data(y, w):
df_week_inci = df_inci2017[(df_inci2017.epiweek <= w)]
df_week_hist = df_hist[
(df_hist.base_epiyear == y) &
(df_hist.base_epiweek == w) &
(df_hist.dado == 'srag') &
(df_hist.escala == 'incidência') &
(df_hist.uf == 'BR')
].copy()
df = pd.merge(
df_week_inci[['epiweek', 'srag']],
df_week_hist[['epiweek', 'estimated_cases', '2.5%', '97.5%']],
on='epiweek', how='outer'
)
df.set_index('epiweek', inplace=True)
return df
df = week_data(y, w)
df.plot()
plt.grid(True)
plt.show()
w = 28
df = week_data(y, w)
df.plot()
plt.grid(True)
plt.show()
w = 33
df = week_data(y, w)
df.plot()
plt.grid(True)
plt.show()
"""
Explanation: Displaying data for user selected week w<a name="_historical data display"></a>
For each week w selected by the user, the notification curve will always be that which is found on df_inci, while the estimates will be that stored in df_hist. Data df_inci only has the most recent estimates, which are based on the most recent week with data. The estimates obtained at each week is stored at df_hist.
So, first of all, we will slice the historical data to week w, and limit current data to week <= w.
If w=23, the historical dataset is already correctly sliced in df_hist2017, so we just have to limit the current for the proper plot:
End of explanation
"""
|
rvernagus/data-science-notebooks | scikit-learn/Recipes - Preparing Data.ipynb | mit | from sklearn import datasets
import numpy as np
datasets.*?
boston = datasets.load_boston()
print(boston.DESCR)
X, y = boston.data, boston.target
"""
Explanation: The dataset Module
End of explanation
"""
datasets.make_*?
X, y = datasets.make_regression(n_samples=1000, n_features=1,
n_informative=1, noise=15,
bias=1000, random_state=0)
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter(X, y);
X, y = datasets.make_blobs(n_samples=300, centers=4,
cluster_std=0.6, random_state=0)
plt.scatter(X[:, 0], X[:, 1], s=50);
"""
Explanation: Creating Sample Data
End of explanation
"""
from sklearn import preprocessing
X, y = boston.data, boston.target
X[:, :3].mean(axis=0)
X[:, :3].std(axis=0)
plt.plot(X[:, :3]);
"""
Explanation: Scaling Data
End of explanation
"""
X_2 = preprocessing.scale(X[:, :3])
X_2.mean(axis=0)
X_2.std(axis=0)
plt.plot(X_2);
"""
Explanation: preprocessing.scale
scale centers and scales the data using the following formula:
End of explanation
"""
scaler = preprocessing.StandardScaler()
scaler.fit(X[:, :3])
X_3 = scaler.transform(X[:, :3])
X_3.mean(axis=0)
X_3.std(axis=0)
plt.plot(X_3);
"""
Explanation: StandardScaler
Same as preprocessing.scale but persists scale settings across uses.
End of explanation
"""
scaler = preprocessing.MinMaxScaler()
scaler.fit(X[:, :3])
X_4 = scaler.transform(X[:, :3])
X_4.max(axis=0)
X_4.std(axis=0)
plt.plot(X_4);
scaler = preprocessing.MinMaxScaler(feature_range=(-4, 4))
scaler.fit(X[:, :3])
X_5 = scaler.transform(X[:, :3])
plt.plot(X_5);
"""
Explanation: MinMaxScaler
Scales data within a specified range.
End of explanation
"""
new_target = preprocessing.binarize(boston.target, threshold=boston.target.mean())
new_target[:, :5]
(boston.target[:5] > boston.target.mean()).astype(int)
"""
Explanation: Binarizing Data
preprocessing.binarize
End of explanation
"""
bin = preprocessing.Binarizer(boston.target.mean())
new_target = bin.fit_transform(boston.target)
new_target[:, :5]
"""
Explanation: Binarizer
End of explanation
"""
iris = datasets.load_iris()
X = iris.data
y = iris.target
d = np.column_stack((X, y))
encoder = preprocessing.OneHotEncoder()
encoder.fit_transform(d[:, -1:]).toarray()[:5]
"""
Explanation: Working with Categorical Variables
OneHotEncoder
End of explanation
"""
from sklearn.feature_extraction import DictVectorizer
dv = DictVectorizer()
dict = [{'species': iris.target_names[i]} for i in y]
dv.fit_transform(dict).toarray()[:5]
"""
Explanation: DictVectorizer
End of explanation
"""
import patsy
patsy.dmatrix('0 + C(species)', {'species': iris.target})
"""
Explanation: Patsy
End of explanation
"""
from sklearn.preprocessing import LabelBinarizer
binarizer = LabelBinarizer()
new_target = binarizer.fit_transform(y)
y.shape, new_target.shape
new_target[:5]
new_target[-5:]
binarizer.classes_
"""
Explanation: Binarizing Label Features
LabelBinarizer
End of explanation
"""
binarizer = LabelBinarizer(neg_label=-1000, pos_label=1000)
binarizer.fit_transform(y)[:5]
"""
Explanation: LabelBinarizer and labels
End of explanation
"""
iris = datasets.load_iris()
iris_X = iris.data
masking_array = np.random.binomial(1, .25, iris_X.shape).astype(bool)
iris_X[masking_array] = np.nan
masking_array[:5]
iris_X[:5]
"""
Explanation: Inputing Missing Values through Various Strategies
End of explanation
"""
impute = preprocessing.Imputer()
iris_X_prime = impute.fit_transform(iris_X)
iris_X_prime[:5]
impute = preprocessing.Imputer(strategy='median')
iris_X_prime = impute.fit_transform(iris_X)
iris_X_prime[:5]
iris_X[np.isnan(iris_X)] = -1
iris_X[:5]
impute = preprocessing.Imputer(missing_values=-1)
iris_X_prime = impute.fit_transform(iris_X)
iris_X_prime[:5]
"""
Explanation: By default, Imputer fills in missing values with the mean.
End of explanation
"""
mat = datasets.make_spd_matrix(10)
masking_array = np.random.binomial(1, .1, mat.shape).astype(bool)
mat[masking_array] = np.nan
mat[:4, :4]
"""
Explanation: Using Pipelines for Multiple Preprocessing Steps
End of explanation
"""
from sklearn import pipeline
pipe = pipeline.Pipeline([('impute', impute), ('scaler', scaler)])
pipe
new_mat = pipe.fit_transform(mat)
new_mat[:4, :4]
"""
Explanation: How to create a pipeline:
End of explanation
"""
iris = datasets.load_iris()
iris_X = iris.data
from sklearn import decomposition
pca = decomposition.PCA()
pca
iris_pca = pca.fit_transform(iris_X)
iris_pca[:5]
"""
Explanation: To be included in Pipeline, objects should have fit, transform, and fit_transform methods.
Reducing Dimensionality with PCA (Principal Component Analysis)
End of explanation
"""
pca.explained_variance_ratio_
"""
Explanation: PCA transforms the covariances of the data into column vectors that show certain percentages of the variance:
End of explanation
"""
pca = decomposition.PCA(n_components=2)
iris_X_prime = pca.fit_transform(iris_X)
iris_X.shape, iris_X_prime.shape
plt.scatter(iris_X_prime[:50, 0], iris_X_prime[:50, 1]);
plt.scatter(iris_X_prime[50:100, 0], iris_X_prime[50:100, 1]);
plt.scatter(iris_X_prime[100:150, 0], iris_X_prime[100:150, 1]);
pca.explained_variance_ratio_.sum()
"""
Explanation: High-dimensionality is problematic in data analysis. Consider representing data in fewer dimensions when models overfit on high-dimensional datasets.
End of explanation
"""
pca = decomposition.PCA(n_components=.98)
iris_X_prime = pca.fit(iris_X)
pca.explained_variance_ratio_.sum()
"""
Explanation: You can create a PCA with the desired variance to be explained:
End of explanation
"""
from sklearn.decomposition import FactorAnalysis
fa = FactorAnalysis(n_components=2)
iris_two_dim = fa.fit_transform(iris.data)
iris_two_dim[:5]
"""
Explanation: Using Factor Analysis for Decomposition
Factor analysis differs from PCA in that it makes assumptions about which implicit features underlie the explicit features of a dataset.
End of explanation
"""
A1_mean = [1, 1]
A1_cov = [[2, .99], [1, 1]]
A1 = np.random.multivariate_normal(A1_mean, A1_cov, 50)
A2_mean = [5, 5]
A2_cov = [[2, .99], [1, 1]]
A2 = np.random.multivariate_normal(A2_mean, A2_cov, 50)
A = np.vstack((A1, A2))
B_mean = [5, 0]
B_cov = [[.5, -1], [-.9, .5]]
B = np.random.multivariate_normal(B_mean, B_cov, 100)
plt.scatter(A[:, 0], A[:, 1]);
plt.scatter(B[:, 0], B[:, 1]);
kpca = decomposition.KernelPCA(kernel='cosine', n_components=1)
AB = np.vstack((A, B))
AB_transformed = kpca.fit_transform(AB)
plt.scatter(AB_transformed[:50], np.zeros(AB_transformed[:50].shape), alpha=0.5);
plt.scatter(AB_transformed[50:], np.zeros(AB_transformed[50:].shape)+0.001, alpha=0.5);
pca = decomposition.PCA(n_components=2)
AB_prime = pca.fit_transform(AB)
plt.scatter(AB_prime[:, 0], np.zeros(AB_prime[:, 0].shape), alpha=0.5);
plt.scatter(AB_prime[:, 1], np.zeros(AB_prime[:, 1].shape)+0.001, alpha=0.5);
"""
Explanation: Kernel PCA for Nonlinear Dimensionality Reduction
When data is not lineraly seperable, Kernel PCA can help. Here, data is projected by the kernel function and then PCA is performed.
End of explanation
"""
iris = datasets.load_iris()
iris_data = iris.data
itis_target = iris.target
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(2)
iris_transformed = svd.fit_transform(iris_data)
iris_data[:5]
iris_transformed[:5]
plt.scatter(iris_data[:50, 0], iris_data[:50, 2]);
plt.scatter(iris_data[50:100, 0], iris_data[50:100, 2]);
plt.scatter(iris_data[100:150, 0], iris_data[100:150, 2]);
plt.scatter(iris_transformed[:50, 0], -iris_transformed[:50, 1]);
plt.scatter(iris_transformed[50:100, 0], -iris_transformed[50:100, 1]);
plt.scatter(iris_transformed[100:150, 0], -iris_transformed[100:150, 1]);
"""
Explanation: Using Truncated SVD to Reduce Dimensionality
Singular Value Decomposition (SVD) factors a matrix M into three matrices: U, Σ, and V. Whereas PCA factors the covariance matrix, SVD factors the data matrix itself.
Given an n x n matrix, SVD will create an n-column matrix. Truncated SVD will create an arbitrary columned dataset based on the specified number.
End of explanation
"""
from scipy.linalg import svd
D = np.array([[1, 2], [1, 3], [1, 4]])
D
U, S, V = svd(D, full_matrices=False)
U.shape, S.shape, V.shape
np.dot(U.dot(np.diag(S)), V)
new_S = S[0]
new_U = U[:, 0]
new_U.dot(new_S)
"""
Explanation: How It Works
End of explanation
"""
from sklearn.decomposition import DictionaryLearning
dl = DictionaryLearning(3) # 3 species of iris
transformed = dl.fit_transform(iris_data[::2])
transformed[:5]
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(transformed[0:25, 0], transformed[0:25, 1], transformed[0:25, 2]);
ax.scatter(transformed[25:50, 0], transformed[25:50, 1], transformed[25:50, 2]);
ax.scatter(transformed[50:75, 0], transformed[50:75, 1], transformed[50:75, 2]);
transformed = dl.transform(iris_data[1::2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(transformed[0:25, 0], transformed[0:25, 1], transformed[0:25, 2]);
ax.scatter(transformed[25:50, 0], transformed[25:50, 1], transformed[25:50, 2]);
ax.scatter(transformed[50:75, 0], transformed[50:75, 1], transformed[50:75, 2]);
"""
Explanation: Decomposition to Classify with DictionaryLearning
DictionaryLearning assumes that the features are the basis for the resulting datasets.
End of explanation
"""
iris = datasets.load_iris()
iris_data = iris.data
mask = np.random.binomial(1, .25, iris_data.shape).astype(bool)
iris_data[mask] = np.nan
iris_data[:5]
pca = decomposition.PCA()
imputer = preprocessing.Imputer()
pipe = pipeline.Pipeline([('imputer', imputer), ('pca', pca)])
iris_data_transformed = pipe.fit_transform(iris_data)
iris_data_transformed[:5]
pipe2 = pipeline.make_pipeline(imputer, pca)
pipe2.steps
iris_data_transformed2 = pipe2.fit_transform(iris_data)
iris_data_transformed2[:5]
"""
Explanation: Putting it All Together with Pipelines
End of explanation
"""
boston = datasets.load_boston()
boston_X = boston.data
boston_y = boston.target
train_set = np.random.choice([True, False], len(boston_y), p=[.75, .25])
from sklearn.gaussian_process import GaussianProcess
gp = GaussianProcess()
gp.fit(boston_X[train_set], boston_y[train_set])
test_preds = gp.predict(boston_X[~train_set])
f, ax = plt.subplots(figsize=(10, 7), nrows=3)
f.tight_layout()
ax[0].plot(range(len(test_preds)), test_preds, label='Predicted Values');
ax[0].plot(range(len(test_preds)), boston_y[~train_set], label='Actual Values');
ax[0].set_title('Predicted vs Actual');
ax[0].legend(loc='best');
ax[1].plot(range(len(test_preds)), test_preds - boston_y[~train_set]);
ax[1].set_title('Plotted Residuals');
ax[2].hist(test_preds - boston_y[~train_set]);
ax[2].set_title('Histogram of Residuals');
"""
Explanation: Using Gaussian Processes for Regression
End of explanation
"""
gp = GaussianProcess(regr='linear', theta0=5e-1)
gp.fit(boston_X[train_set], boston_y[train_set]);
linear_preds = gp.predict(boston_X[~train_set])
f, ax = plt.subplots(figsize=(7, 5))
f.tight_layout()
ax.hist(test_preds - boston_y[~train_set], label='Residuals Original', color='b', alpha=.5);
ax.hist(linear_preds - boston_y[~train_set], label='Residuals Linear', color='r', alpha=.5);
ax.set_title('Residuals');
ax.legend(loc='best');
f, ax = plt.subplots(figsize=(10, 7), nrows=3)
f.tight_layout()
ax[0].plot(range(len(linear_preds)), linear_preds, label='Predicted Linear Values');
ax[0].plot(range(len(linear_preds)), boston_y[~train_set], label='Actual Values');
ax[0].set_title('Predicted Linear vs Actual');
ax[0].legend(loc='best');
ax[1].plot(range(len(linear_preds)), linear_preds - boston_y[~train_set]);
ax[1].set_title('Plotted Residuals');
ax[2].hist(linear_preds - boston_y[~train_set]);
ax[2].set_title('Histogram of Residuals');
np.power(test_preds - boston_y[~train_set], 2).mean(), np.power(linear_preds - boston_y[~train_set], 2).mean()
"""
Explanation: You can tune regr and thea0 to get different predictions:
End of explanation
"""
test_preds, MSE = gp.predict(boston_X[~train_set], eval_MSE=True)
MSE[:5]
f, ax = plt.subplots(figsize=(7, 5))
n = 20
rng = range(n)
ax.scatter(rng, test_preds[:n]);
ax.errorbar(rng, test_preds[:n], yerr=1.96*MSE[:n]);
ax.set_title('Predictions with Error Bars');
ax.set_xlim((-1, 21));
"""
Explanation: Measuring Uncertainty
End of explanation
"""
from sklearn.gaussian_process import regression_models
X, y = datasets.make_regression(1000, 1, 1)
regression_models.constant(X)[:5]
regression_models.linear(X)[:5]
regression_models.quadratic(X)[:5]
"""
Explanation: Defining the Gaussian Process Object Directly
End of explanation
"""
X, y = datasets.make_regression((int(1e6)))
"""
Explanation: Using Stochastic Gradient Descent for Regression
End of explanation
"""
X.nbytes / 1e6
from sklearn import linear_model
sgd = linear_model.SGDRegressor()
train = np.random.choice([True, False], size=len(y), p=[.75, .25])
sgd.fit(X[train], y[train])
"""
Explanation: Size of the regression (MB):
End of explanation
"""
|
junhwanjang/DataSchool | Lecture/23. PCA/2) 고유분해와 특이값 분해.ipynb | mit | w, V = np.linalg.eig(np.array([[1, -2], [2, -3]]))
w
V
"""
Explanation: 고유분해와 특이값 분해
정방 행렬 $A$에 대해 다음 식을 만족하는 단위 벡터 $v$, 스칼라 $\lambda$을 여러 개 찾을 수 있다.
$$ Av = \lambda v $$
$ A \in \mathbf{R}^{M \times M} $
$ \lambda \in \mathbf{R} $
$ v \in \mathbf{R}^{M} $
이러한 실수 $\lambda$를 고유값(eigenvalue), 단위 벡터 $v$ 를 고유벡터(eigenvector) 라고 하며 고유값과 고유벡터를 찾는 작업을 고유분해(eigen-decomposition)라고 한다.
$ A \in \mathbf{R}^{M \times M} $ 에 대해 최대 $M$개의 고유값-고유벡터 쌍이 존재할 수 있다.
예를 들어 다음 행렬 $A$
$$
A=
\begin{bmatrix}
1 & -2 \
2 & -3
\end{bmatrix}
$$
에 대해 다음 단위 벡터와 스칼라 값은 고유벡터-고유값이 된다.
$$\lambda = -1$$
$$
v=
\begin{bmatrix}
\dfrac{1}{\sqrt{2}} \
\dfrac{1}{\sqrt{2}}
\end{bmatrix}
$$
복수 개의 고유 벡터가 존재하는 경우에는 다음과 같이 고유벡터 행렬 $V$와 고유값 행렬 $\Lambda$로 표기할 수 있다.
$$
A \left[ v_1 \cdots v_M \right] =
\left[ \lambda_1 v_1 \cdots \lambda_M v_M \right] =
\left[ v_1 \cdots v_M \right]
\begin{bmatrix}
\lambda_{1} & 0 & \cdots & 0 \
0 & \lambda_{2} & \cdots & 0 \
\vdots & \vdots & \ddots & \vdots \
0 & 0 & \cdots & \lambda_{M} \
\end{bmatrix}
$$
$$ AV = V\Lambda $$
여기에서
$$
V = \left[ v_1 \cdots v_M \right]
$$
$$
\Lambda =
\begin{bmatrix}
\lambda_{1} & 0 & \cdots & 0 \
0 & \lambda_{2} & \cdots & 0 \
\vdots & \vdots & \ddots & \vdots \
0 & 0 & \cdots & \lambda_{M} \
\end{bmatrix}
$$
numpy linalg 서브패키지에서는 고유값과 고유벡터를 구할 수 있는 eig 명령을 제공한다.
End of explanation
"""
mu = [2, 3]
cov = [[2, 3],[3, 7]]
rv = sp.stats.multivariate_normal(mu, cov)
xx = np.linspace(0, 4, 120)
yy = np.linspace(1, 5, 150)
XX, YY = np.meshgrid(xx, yy)
plt.grid(False)
plt.contourf(XX, YY, rv.pdf(np.dstack([XX, YY])))
x1 = np.array([0, 2])
x1_mu = x1 - mu
x2 = np.array([3, 4])
x2_mu = x2 - mu
plt.plot(x1_mu[0] + mu[0], x1_mu[1] + mu[1], 'bo', ms=20)
plt.plot(x2_mu[0] + mu[0], x2_mu[1] + mu[1], 'ro', ms=20)
plt.axis("equal")
plt.show()
w, V = np.linalg.eig(cov)
w
V
rv = sp.stats.multivariate_normal(mu, w)
xx = np.linspace(0, 4, 120)
yy = np.linspace(1, 5, 150)
XX, YY = np.meshgrid(xx, yy)
plt.grid(False)
plt.contourf(XX, YY, rv.pdf(np.dstack([XX, YY])))
x1 = np.array([0, 2])
x1_mu = x1 - mu
x2 = np.array([3, 4])
x2_mu = x2 - mu
x1t_mu = V.T.dot(x1_mu) # 좌표 변환
x2t_mu = V.T.dot(x2_mu) # 좌표 변환
plt.plot(x1t_mu[0] + mu[0], x1t_mu[1] + mu[1], 'bo', ms=20)
plt.plot(x2t_mu[0] + mu[0], x2t_mu[1] + mu[1], 'ro', ms=20)
plt.axis("equal")
plt.show()
"""
Explanation: 대칭 행렬의 고유 분해
행렬 $A$가 대칭(symmetric) 행렬이면 고유값 벡터 행렬 $V$는 다음과 같이 전치 행렬이 역행렬과 같아진다.
$$ V^T V = V V^T = I$$
이 때는 고유 분해가 다음과 같이 표시된다.
$$ A = V\Lambda V^T = \sum_{i=1}^{M} {\lambda_i} v_i v_i^T$$
$$ A^{-1} = V \Lambda^{-1} V^T = \sum_{i=1}^{M} \dfrac{1}{\lambda_i} v_i v_i^T$$
확률 변수의 좌표 변환
확률 변수의 공분산 행렬 $\Sigma$ 은 대칭 행렬이므로 위의 관계식이 성립한다.
따라서 다변수 가우시안 정규 분포의 확률 밀도 함수는 다음과 같이 표시할 수 있다.
$$
\begin{eqnarray}
\mathcal{N}(x \mid \mu, \Sigma)
&=& \dfrac{1}{(2\pi)^{D/2} |\Sigma|^{1/2}} \exp \left( -\dfrac{1}{2} (x-\mu)^T \Sigma^{-1} (x-\mu) \right) \
&=& \dfrac{1}{(2\pi)^{D/2} |\Sigma|^{1/2}} \exp \left( -\dfrac{1}{2} (x-\mu)^T V \Lambda^{-1} V^T (x-\mu) \right) \
&=& \dfrac{1}{(2\pi)^{D/2} |\Sigma|^{1/2}} \exp \left( -\dfrac{1}{2} (V^T(x-\mu))^T \Lambda^{-1} (V^T (x-\mu)) \right) \
\end{eqnarray}
$$
즉 변환 행렬 $V^T$로 좌표 변환하면 서로 독립인 성분들로 나누어진다.
End of explanation
"""
from pprint import pprint
M = np.array([[1,0,0,0,0],[0,0,2,0,3],[0,0,0,0,0],[0,2,0,0,0]])
print("\nM:"); pprint(M)
U, S0, V0 = np.linalg.svd(M, full_matrices=True)
print("\nU:"); pprint(U)
S = np.hstack([np.diag(S0), np.zeros(M.shape[0])[:, np.newaxis]])
print("\nS:"); pprint(S)
print("\nV:"); pprint(V)
V = V0.T
print("\nU.dot(U.T):"); pprint(U.dot(U.T))
print("\nV.dot(V.T):"); pprint(V.dot(V.T))
print("\nU.dot(S).dot(V.T):"); pprint(U.dot(S).dot(V.T))
"""
Explanation: 특이값 분해
정방 행렬이 아닌 행렬 $M$에 대해서도 고유 분해와 유사한 분해가 가능하다. 이를 특이값 분해(singular value decomposition)이라고 한다.
$M \in \mathbf{R}^{m \times n}$
$$M = U \Sigma V^T$$
여기에서
* $U \in \mathbf{R}^{m \times m}$
* $\Sigma \in \mathbf{R}^{m \times n}$
* $V \in \mathbf{R}^{n \times n}$
이고 행렬 $U$와 $V$는 다음 관계를 만족한다.
$$ U^T U = UU^T = I $$
$$ V^T V = VV^T = I $$
예를 들어
$$\mathbf{M} = \begin{bmatrix}
1 & 0 & 0 & 0 & 2 \
0 & 0 & 3 & 0 & 0 \
0 & 0 & 0 & 0 & 0 \
0 & 2 & 0 & 0 & 0
\end{bmatrix}
$$
에 대한 특이값 분해 결과는 다음과 같다.
$$
\begin{align}
\mathbf{U} &= \begin{bmatrix}
0 & 0 & 1 & 0 \
1 & 0 & 0 & 0 \
0 & 0 & 0 & -1 \
0 & 1 & 0 & 0 \
\end{bmatrix} \
\boldsymbol{\Sigma} &= \begin{bmatrix}
\sqrt{5} & 0 & 0 & 0 & 0 \
0 & 2 & 0 & 0 & 0 \
0 & 0 & 1 & 0 & 0 \
0 & 0 & 0 & 0 & 0
\end{bmatrix} \
\mathbf{V}^T &= \begin{bmatrix}
0 & 0 & \sqrt{0.2} & 0 & \sqrt{0.8} \
0 & 1 & 0 & 0 & 0 \
1 & 0 & 0 & 0 & 0 \
0 & 0 & -\sqrt{0.8} & 0 & \sqrt{0.2} \
0 & 0 & 0 & 1 & 0 \
\end{bmatrix}
\end{align}$$
이는 다음과 같이 확인 할 수 있다.
$$\begin{align}
\mathbf{U} \mathbf{U^T} &=
\begin{bmatrix}
0 & 0 & 1 & 0 \
1 & 0 & 0 & 0 \
0 & 0 & 0 & -1 \
0 & 1 & 0 & 0 \
\end{bmatrix}
\cdot
\begin{bmatrix}
0 & 1 & 0 & 0 \
0 & 0 & 0 & 1 \
1 & 0 & 0 & 0 \
0 & 0 & -1 & 0 \
\end{bmatrix}
=
\begin{bmatrix}
1 & 0 & 0 & 0 \
0 & 1 & 0 & 0 \
0 & 0 & 1 & 0 \
0 & 0 & 0 & 1
\end{bmatrix}
= \mathbf{I}_4 \
\mathbf{V} \mathbf{V^T} &=
\begin{bmatrix}
0 & 0 & \sqrt{0.2} & 0 & \sqrt{0.8} \
0 & 1 & 0 & 0 & 0 \
1 & 0 & 0 & 0 & 0 \
0 & 0 & -\sqrt{0.8} & 0 & \sqrt{0.2} \
0 & 0 & 0 & 1 & 0 \
\end{bmatrix}
\cdot
\begin{bmatrix}
0 & 0 & 1 & 0 & 0 \
0 & 1 & 0 & 0 & 0 \
\sqrt{0.2} & 0 & 0 & -\sqrt{0.8} & 0\
0 & 0 & 0 & 0 & 1 \
\sqrt{0.8} & 0 & 0 & \sqrt{0.2} & 0 \
\end{bmatrix}
=
\begin{bmatrix}
1 & 0 & 0 & 0 & 0 \
0 & 1 & 0 & 0 & 0 \
0 & 0 & 1 & 0 & 0 \
0 & 0 & 0 & 1 & 0 \
0 & 0 & 0 & 0 & 1
\end{bmatrix}
= \mathbf{I}_5
\end{align}$$
End of explanation
"""
|
DakotaNelson/msf-stats | Exploit Payload Sizes.ipynb | mit | %matplotlib inline
import os
import re
import sys
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# Set up a path to the Metasploit project's code.
basepath = os.path.join('/', 'home', 'dnelson', 'projects', 'msf-stats')
rootdir = os.path.join(basepath, 'metasploit-framework', 'modules', 'exploits')
rootdir
# Iterate through every exploit module, searching for the amount of space that exploit provides to fit a payload in.
all_sizes = []
for folder, subs, files in os.walk(rootdir):
for filename in files:
with open(os.path.join(folder, filename), 'r') as sploit:
#print("parsing " + filename + "...")
text = sploit.read()
# remove all whitespace
text = ''.join(text.split())
space = re.search("\'Space\'=>(\d+)\,", text)
# Note that if no payload size limit is specified, we simply ignore that module
if space:
all_sizes.append(int(space.group(1)))
print("Modules processed: " + str(len(all_sizes)))
sorted_sizes = np.sort(all_sizes)
cumulative = np.cumsum(sorted_sizes)
print(sorted_sizes)
# looks to me like we should exclude that last one, since it's waaaaaaay larger than the rest
sorted_sizes = sorted_sizes[:-1]
# Plot our sorted sizes against a fraction, 0 to 1, of all exploits.
plt.figure(figsize = (10,5))
plt.plot(sorted_sizes, np.linspace(1,0,len(sorted_sizes)), linewidth=3, color='black')
plt.xlim((0,10000))
plt.xlabel("Payload size (bytes)", size=16)
plt.ylabel("Fraction of exploits which\n accept that payload size", size=16)
# Plot some vertical lines for emphasis; a red line at 512 bytes, green at 1024, and blue at 2048.
plt.axvline(512, color='red', linewidth=2)
plt.axvline(1024, color='green', linewidth=2)
plt.axvline(2048, color='blue', linewidth=2)
plt.show()
"""
Explanation: Metasploit Payload Size
Or: "How big can my stagers be, really?"
The idea here is to parse through the Metasploit Project's available exploits to determine what the distribution of payload sizes is.
This can help make decisions for stager size optimization - if I have a great idea for a stager (or other exploit payload), but can't make it any smaller than 1k, is it worth it? What if it's 2k? And so on.
As it turns out, payloads over 2kb work with less than 20% of available exploits, and payloads over 1kb only work with about 60% - if you can't make your stager under 2k, you shouldn't expect to be able to use it very often at all.
End of explanation
"""
plt.figure(figsize = (10,5))
plt.hist(sorted_sizes, 500)
plt.xlim((0,35000))
plt.yscale('log')
plt.xlabel("Payload Size (bytes)", size=16)
plt.ylabel("Number of Exploits, Log Scale", size=16)
plt.show()
"""
Explanation: We'll start with displaying payload size against the fraction of exploits which will work (or not work) for that size. It looks like any payload over 2048 bytes will only work with about 20% of exploits - a little less, in fact! If any of your stagers are just barely above 1024 bytes, it's well worth the effort to trim those last few bytes. Almost a quarter of exploits available in Metasploit have a payload size cutoff at 1024 bytes.
This chart can also be read as a probability: if I want to send a 1000 byte payload, and I pick an exploit at random (or, I find a vulnerable host at random), I have about a 60% chance that the exploit I end up with will be able to accomodate that payload. If my payload is 500 bytes, that probability becomes more than 90%.
End of explanation
"""
sploitsByCategory = {}
for folder, subs, files in os.walk(rootdir):
for filename in files:
with open(os.path.join(folder, filename), 'r') as sploit:
#print("parsing " + filename + "...")
text = sploit.read()
# remove all whitespace
text = ''.join(text.split())
space = re.search("\'Space\'=>(\d+)\,", text)
# Note that if no payload size limit is specified, we simply ignore that module
if space:
# get the first folder in the exploits directory by
# 1. Stripping off the rootdir using [len(rootdir):]
# 2. Splitting on the folder separator (not platform-independent)
# 3. Taking the first one (the zeroth one is '' since there's always a leading /)
sploitType = folder[len(rootdir):].split('/')[1]
try:
sploitsByCategory[sploitType].append(int(space.group(1)))
except KeyError:
sploitsByCategory[sploitType] = []
sploitsByCategory[sploitType].append(int(space.group(1)))
# Yeah, that's right, a nested list comprehension. To print.
[str(sploits) + ": " + ",".join([str(num) for num in sploitsByCategory[sploits]]) for sploits in sploitsByCategory]
sortedSploits = {}
platforms = ['linux', 'windows', 'unix', 'multi']
for platform in platforms:
sortedSploits[platform] = np.sort(sploitsByCategory[platform])
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
plt.figure(figsize = (10,5))
for i, platform in enumerate(platforms):
plt.plot(sortedSploits[platform], np.linspace(1,0,len(sortedSploits[platform])), linewidth=3, color=colors[i])
plt.xlim((0,10000))
plt.xlabel("Payload size (bytes)", size=16)
plt.ylabel("Fraction of exploits which\n accept that payload size", size=16)
plt.legend([platform for platform in platforms])
plt.show()
"""
Explanation: The humble histogram finishes out our exploration today - note that it's on a log scale. This is significantly less useful than the above charts, since payload size is a cumulative number (i.e. smaller payloads still work in exploits with more than enough space for them), but this view is interesting in that it shows us where there are large clusters of exploits accepting a certain payload size.
But, what about platforms? Are our results being influenced by the category of the exploit? (Windows and Linux vs. Android and iOS, etc.)
End of explanation
"""
|
diegocavalca/Studies | programming/Python/tensorflow/exercises/Neural_Network_Part1_Solutions.ipynb | cc0-1.0 | from __future__ import print_function
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
from datetime import date
date.today()
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
tf.__version__
np.__version__
"""
Explanation: Neural Network
End of explanation
"""
_x = np.linspace(-10., 10., 1000)
x = tf.convert_to_tensor(_x)
relu = tf.nn.relu(x)
elu = tf.nn.elu(x)
softplus = tf.nn.softplus(x)
with tf.Session() as sess:
_relu, _elu, _softplus = sess.run([relu, elu, softplus])
plt.plot(_x, _relu, label='relu')
plt.plot(_x, _elu, label='elu')
plt.plot(_x, _softplus, label='softplus')
plt.legend(bbox_to_anchor=(0.5, 1.0))
plt.show()
"""
Explanation: Activation Functions
Q1. Apply relu, elu, and softplus to x.
End of explanation
"""
_x = np.linspace(-10., 10., 1000)
x = tf.convert_to_tensor(_x)
sigmoid = tf.nn.sigmoid(x)
tanh = tf.nn.tanh(x)
with tf.Session() as sess:
_sigmoid, _tanh = sess.run([sigmoid, tanh])
plt.plot(_x, _sigmoid, label='sigmoid')
plt.plot(_x, _tanh, label='tanh')
plt.legend(bbox_to_anchor=(0.5, 1.0))
plt.grid()
plt.show()
"""
Explanation: Q2. Apply sigmoid and tanh to x.
End of explanation
"""
_x = np.array([[1, 2, 4, 8], [2, 4, 6, 8]], dtype=np.float32)
x = tf.convert_to_tensor(_x)
out = tf.nn.softmax(x, dim=-1)
with tf.Session() as sess:
_out = sess.run(out)
print(_out)
assert np.allclose(np.sum(_out, axis=-1), 1)
"""
Explanation: Q3. Apply softmax to x.
End of explanation
"""
_x = np.array([[1, 2, 4, 8], [2, 4, 6, 8]], dtype=np.float32)
print("_x =\n" , _x)
x = tf.convert_to_tensor(_x)
out = tf.nn.dropout(x, keep_prob=0.5)
with tf.Session() as sess:
_out = sess.run(out)
print("_out =\n", _out)
"""
Explanation: Q4. Apply dropout with keep_prob=.5 to x.
End of explanation
"""
x = tf.random_normal([8, 10])
out = tf.contrib.layers.fully_connected(inputs=x, num_outputs=2,
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.contrib.layers.xavier_initializer())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(out))
"""
Explanation: Fully Connected
Q5. Apply a fully connected layer to x with 2 outputs and then an sigmoid function.
End of explanation
"""
tf.reset_default_graph()
x = tf.random_uniform(shape=(2, 3, 3, 3), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(2, 2, 3, 2), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = tf.nn.conv2d(x, filter, strides=[1, 1, 1, 1], padding="SAME")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
"""
Explanation: Convolution
Q6. Apply 2 kernels of width-height (2, 2), stride 1, and same padding to x.
End of explanation
"""
tf.reset_default_graph()
x = tf.random_uniform(shape=(4, 10, 10, 3), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(2, 2, 3, 2), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = tf.nn.atrous_conv2d(x, filter, padding="VALID", rate=2)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
# Do we really have to distinguish between these two functions?
# Unless you want to use stride of 2 or more,
# You can just use tf.nn.atrous_conv2d. For normal convolution, set rate 1.
"""
Explanation: Q7. Apply 3 kernels of width-height (2, 2), stride 1, dilation_rate 2 and valid padding to x.
End of explanation
"""
tf.reset_default_graph()
x = tf.random_uniform(shape=(4, 10, 10, 5), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(3, 3, 5, 4), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = tf.nn.conv2d(x, filter, strides=[1, 2, 2, 1], padding="SAME")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
"""
Explanation: Q8. Apply 4 kernels of width-height (3, 3), stride 2, and same padding to x.
End of explanation
"""
tf.reset_default_graph()
x = tf.random_uniform(shape=(4, 10, 10, 5), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(3, 3, 5, 4), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = tf.nn.depthwise_conv2d(x, filter, strides=[1, 2, 2, 1], padding="SAME")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
"""
Explanation: Q9. Apply 4 times of kernels of width-height (3, 3), stride 2, and same padding to x, depth-wise.
End of explanation
"""
tf.reset_default_graph()
x = tf.random_uniform(shape=(4, 10, 5), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(3, 5, 5), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = tf.nn.conv1d(x, filter, stride=2, padding="VALID")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
"""
Explanation: Q10. Apply 5 kernels of height 3, stride 2, and valid padding to x.
End of explanation
"""
tf.reset_default_graph()
x = tf.random_uniform(shape=(4, 5, 5, 4), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(3, 3, 5, 4), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
shp = x.get_shape().as_list()
output_shape = [shp[0], shp[1]*2, shp[2]*2, 5]
out = tf.nn.conv2d_transpose(x, filter, strides=[1, 2, 2, 1], output_shape=output_shape, padding="SAME")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
"""
Explanation: Q11. Apply conv2d transpose with 5 kernels of width-height (3, 3), stride 2, and same padding to x.
End of explanation
"""
tf.reset_default_graph()
x = tf.random_uniform(shape=(4, 5, 5, 4), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(3, 3, 5, 4), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
shp = x.get_shape().as_list()
output_shape = [shp[0], (shp[1]-1)*2+3, (shp[2]-1)*2+3, 5]
out = tf.nn.conv2d_transpose(x, filter, strides=[1, 2, 2, 1], output_shape=output_shape, padding="VALID")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
"""
Explanation: Q12. Apply conv2d transpose with 5 kernels of width-height (3, 3), stride 2, and valid padding to x.
End of explanation
"""
_x = np.zeros((1, 3, 3, 3), dtype=np.float32)
_x[0, :, :, 0] = np.arange(1, 10, dtype=np.float32).reshape(3, 3)
_x[0, :, :, 1] = np.arange(10, 19, dtype=np.float32).reshape(3, 3)
_x[0, :, :, 2] = np.arange(19, 28, dtype=np.float32).reshape(3, 3)
print("1st channel of x =\n", _x[:, :, :, 0])
print("\n2nd channel of x =\n", _x[:, :, :, 1])
print("\n3rd channel of x =\n", _x[:, :, :, 2])
x = tf.constant(_x)
maxpool = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 1, 1, 1], padding="VALID")
avgpool = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 1, 1, 1], padding="VALID")
with tf.Session() as sess:
_maxpool, _avgpool = sess.run([maxpool, avgpool])
print("\n1st channel of max pooling =\n", _maxpool[:, :, :, 0])
print("\n2nd channel of max pooling =\n", _maxpool[:, :, :, 1])
print("\n3rd channel of max pooling =\n", _maxpool[:, :, :, 2])
print("\n1st channel of avg pooling =\n", _avgpool[:, :, :, 0])
print("\n2nd channel of avg pooling =\n", _avgpool[:, :, :, 1])
print("\n3rd channel of avg pooling =\n", _avgpool[:, :, :, 2])
"""
Explanation: Q13. Apply max pooling and average pooling of window size 2, stride 1, and valid padding to x.
End of explanation
"""
|
arcyfelix/Courses | 17-08-31-Zero-to-Deep-Learning-with-Python-and-Keras/3 Machine Learning.ipynb | apache-2.0 | %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('./data/weight-height.csv')
df.head()
df.plot(kind = 'scatter',
figsize = (7, 7),
x = 'Height',
y = 'Weight',
title = 'Weight and Height in adults')
df.plot(kind = 'scatter',
figsize = (7, 7),
x = 'Height',
y = 'Weight',
title = 'Weight and Height in adults')
# Here we're plotting the red line 'by hand' with fixed values
# We'll try to learn this line with an algorithm below
plt.plot([55, 78], [75, 250], color='red', linewidth=3)
def line(x, w=0, b=0):
return x * w + b
x = np.linspace(55, 80, 100)
x
yhat = line(x, w = 0, b = 0)
yhat
df.plot(kind = 'scatter',
figsize = (7, 7),
x = 'Height',
y = 'Weight',
title = 'Weight and Height in adults')
plt.plot(x, yhat, color='red', linewidth=3)
"""
Explanation: Linear Regression
End of explanation
"""
def mean_squared_error(y_true, y_pred):
s = (y_true - y_pred) ** 2
return s.mean()
X = df[['Height']].values
y_true = df['Weight'].values
y_true
y_pred = line(X)
y_pred
mean_squared_error(y_true, y_pred.ravel())
"""
Explanation: Cost Function
End of explanation
"""
plt.figure(figsize=(10, 5))
# we are going to draw 2 plots in the same figure
# first plot, data and a few lines
ax1 = plt.subplot(121)
df.plot(kind = 'scatter',
x = 'Height',
y = 'Weight',
title = 'Weight and Height in adults', ax=ax1)
# let's explore the cost function for a few values of b between -100 and +150
bbs = np.array([-100, -50, 0, 50, 100, 150])
mses = [] # we will append the values of the cost here, for each line
for b in bbs:
y_pred = line(X, w = 2, b = b)
mse = mean_squared_error(y_true, y_pred)
mses.append(mse)
plt.plot(X, y_pred)
# second plot: Cost function
ax2 = plt.subplot(122)
plt.plot(bbs, mses, 'o-')
plt.title('Cost as a function of b')
plt.xlabel('b')
"""
Explanation: Manual exploration of different values of W and b
End of explanation
"""
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, SGD
model = Sequential()
model.add(Dense(1, input_shape=(1,)))
model.summary()
model.compile(Adam(lr = 0.8), loss = 'mean_squared_error')
model.fit(X, y_true, epochs = 40)
y_pred = model.predict(X)
df.plot(kind = 'scatter',
x = 'Height',
y = 'Weight',
title = 'Weight and Height in adults')
plt.plot(X, y_pred, color='red')
W, B = model.get_weights()
W
B
"""
Explanation: Linear Regression with Keras
End of explanation
"""
from sklearn.metrics import r2_score
print("The R2 score is {:0.3f}".format(r2_score(y_true, y_pred)))
"""
Explanation: Evaluating Model Performance
End of explanation
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_true,
test_size=0.2)
len(X_train)
len(X_test)
W[0, 0] = 0.0
B[0] = 0.0
model.set_weights((W, B))
model.fit(X_train, y_train, epochs = 50, verbose = 0)
y_train_pred = model.predict(X_train).ravel()
y_test_pred = model.predict(X_test).ravel()
from sklearn.metrics import mean_squared_error as mse
print("The Mean Squared Error on the Train set is:\t{:0.1f}".format(mse(y_train, y_train_pred)))
print("The Mean Squared Error on the Test set is:\t{:0.1f}".format(mse(y_test, y_test_pred)))
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
"""
Explanation: Train Test Split
End of explanation
"""
df = pd.read_csv('./data/user_visit_duration.csv')
df.head()
df.plot(kind = 'scatter',
x='Time (min)',
y='Buy')
model = Sequential()
model.add(Dense(1, input_shape=(1,), activation='sigmoid'))
model.compile(SGD(lr = 0.5),
loss = 'binary_crossentropy',
metrics=['accuracy'])
model.summary()
X = df[['Time (min)']].values
y = df['Buy'].values
model.fit(X, y, epochs = 25)
ax = df.plot(kind='scatter',
x = 'Time (min)',
y ='Buy',
title = 'Purchase behavior VS time spent on site')
temp = np.linspace(0, 4)
ax.plot(temp, model.predict(temp), color = 'orange')
plt.legend(['model', 'data'])
temp_class = model.predict(temp) > 0.5
ax = df.plot(kind = 'scatter',
x = 'Time (min)',
y = 'Buy',
title = 'Purchase behavior VS time spent on site')
temp = np.linspace(0, 4)
ax.plot(temp, temp_class, color = 'orange')
plt.legend(['model', 'data'])
y_pred = model.predict(X)
y_class_pred = y_pred > 0.5
from sklearn.metrics import accuracy_score
print("The accuracy score is {:0.3f}".format(accuracy_score(y, y_class_pred)))
"""
Explanation: Classification
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
params = model.get_weights()
params = [np.zeros(w.shape) for w in params]
model.set_weights(params)
print("The accuracy score is {:0.3f}".format(accuracy_score(y, model.predict(X) > 0.5)))
model.fit(X_train, y_train, epochs = 25, verbose = 0)
print("The train accuracy score is {:0.3f}".format(accuracy_score(y_train, model.predict(X_train) > 0.5)))
print("The test accuracy score is {:0.3f}".format(accuracy_score(y_test, model.predict(X_test) > 0.5)))
"""
Explanation: Train/Test split
End of explanation
"""
from keras.wrappers.scikit_learn import KerasClassifier
def build_logistic_regression_model():
model = Sequential()
model.add(Dense(1,
input_shape = (1,),
activation = 'sigmoid'))
model.compile(SGD(lr = 0.5),
loss = 'binary_crossentropy',
metrics = ['accuracy'])
return model
model = KerasClassifier(build_fn = build_logistic_regression_model,
epochs = 25,
verbose = 0)
from sklearn.model_selection import cross_val_score, KFold
cv = KFold(3, shuffle = True)
scores = cross_val_score(model, X, y, cv = cv)
scores
print("The cross validation accuracy is {:0.4f} ± {:0.4f}".format(scores.mean(), scores.std()))
"""
Explanation: Cross Validation
End of explanation
"""
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_class_pred)
def pretty_confusion_matrix(y_true, y_pred, labels = ["False", "True"]):
cm = confusion_matrix(y_true, y_pred)
pred_labels = ['Predicted '+ l for l in labels]
df = pd.DataFrame(cm, index = labels, columns = pred_labels)
return df
pretty_confusion_matrix(y, y_class_pred, ['Not Buy', 'Buy'])
from sklearn.metrics import precision_score, recall_score, f1_score
print("Precision:\t{:0.3f}".format(precision_score(y, y_class_pred)))
print("Recall: \t{:0.3f}".format(recall_score(y, y_class_pred)))
print("F1 Score:\t{:0.3f}".format(f1_score(y, y_class_pred)))
from sklearn.metrics import classification_report
print(classification_report(y, y_class_pred))
"""
Explanation: Confusion Matrix
End of explanation
"""
df = pd.read_csv('./data/weight-height.csv')
df.head()
df['Gender'].unique()
pd.get_dummies(df['Gender'], prefix = 'Gender').head()
"""
Explanation: Feature Preprocessing
Categorical Features
End of explanation
"""
df['Height (feet)'] = df['Height']/12.0
df['Weight (100 lbs)'] = df['Weight']/100.0
df.describe().round(2)
"""
Explanation: Feature Transformations
1) Rescale with fixed factor
End of explanation
"""
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
df['Weight_mms'] = mms.fit_transform(df[['Weight']])
df['Height_mms'] = mms.fit_transform(df[['Height']])
df.describe().round(2)
"""
Explanation: MinMax normalization
End of explanation
"""
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
df['Weight_ss'] = ss.fit_transform(df[['Weight']])
df['Height_ss'] = ss.fit_transform(df[['Height']])
df.describe().round(2)
plt.figure(figsize=(15, 5))
for i, feature in enumerate(['Height', 'Height (feet)', 'Height_mms', 'Height_ss']):
plt.subplot(1, 4, i+1)
df[feature].plot(kind = 'hist',
title = feature)
plt.xlabel(feature)
"""
Explanation: 3) Standard normalization
End of explanation
"""
ex1 = pd.read_csv('./data/housing-data.csv')
ex1.head()
ex1.shape
plt.figure(figsize=(20, 5))
for i, feature in enumerate(ex1.columns):
plt.subplot(1, 4, i + 1)
ex1[feature].plot(kind = 'hist',
title = feature)
plt.xlabel(feature)
plt.tight_layout()
X = ex1[['sqft', 'bdrms', 'age']]
X.head()
Y = ex1[['price']]
Y.head()
X = X.values
Y = Y.values
X.shape
Y.shape
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD, Adam
model = Sequential()
model.add(Dense(1, input_shape = (3, )))
opt = Adam(lr = 0.8)
model.compile(optimizer = opt, loss = 'mean_squared_error')
model.summary()
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2)
model.fit(X_train, Y_train, epochs = 10)
from sklearn.metrics import r2_score
Y_train_predicted = model.predict(X_train)
Y_test_predicted = model.predict(X_test)
train_score = r2_score(Y_train, Y_train_predicted)
test_score = r2_score(Y_test, Y_test_predicted)
print('Train set score: \t{:0.3f}'.format(train_score))
print('Test set score: \t{:0.3f}'.format(test_score))
"""
Explanation: Machine Learning Exercises
Exercise 1
You've just been hired at a real estate investment firm and they would like you to build a model for pricing houses. You are given a dataset that contains data for house prices and a few features like number of bedrooms, size in square feet and age of the house. Let's see if you can build a model that is able to predict the price. In this exercise we extend what we have learned about linear regression to a dataset with more than one feature. Here are the steps to complete it:
Load the dataset ../data/housing-data.csv
plot the histograms for each feature
create 2 variables called X and y: X shall be a matrix with 3 columns (sqft,bdrms,age) and y shall be a vector with 1 column (price)
create a linear regression model in Keras with the appropriate number of inputs and output
split the data into train and test with a 20% test size
train the model on the training set and check its accuracy on training and test set
how's your model doing? Is the loss growing smaller?
try to improve your model with these experiments:
normalize the input features with one of the rescaling techniques mentioned above
use a different value for the learning rate of your model
use a different optimizer
once you're satisfied with training, check the R2score on the test set
End of explanation
"""
from sklearn.preprocessing import MinMaxScaler
minmax = MinMaxScaler()
X = minmax.fit_transform(X)
Y = minmax.fit_transform(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2)
model = Sequential()
model.add(Dense(1, input_shape = (3, )))
opt = Adam(lr = 0.8)
model.compile(optimizer = opt, loss = 'mean_squared_error')
model.fit(X_train, Y_train, epochs = 20)
Y_train_predicted = model.predict(X_train)
Y_test_predicted = model.predict(X_test)
train_score = r2_score(Y_train, Y_train_predicted)
test_score = r2_score(Y_test, Y_test_predicted)
print('Train set score: \t{:0.3f}'.format(train_score))
print('Test set score: \t{:0.3f}'.format(test_score))
"""
Explanation: MinMaxScaler
End of explanation
"""
model = Sequential()
model.add(Dense(1, input_shape = (3, )))
opt = Adam(lr = 0.1)
model.compile(optimizer = opt, loss = 'mean_squared_error')
model.fit(X_train, Y_train, epochs = 20, verbose = 1)
Y_train_predicted = model.predict(X_train)
Y_test_predicted = model.predict(X_test)
train_score = r2_score(Y_train, Y_train_predicted)
test_score = r2_score(Y_test, Y_test_predicted)
print('Train set score: \t{:0.3f}'.format(train_score))
print('Test set score: \t{:0.3f}'.format(test_score))
"""
Explanation: Lowering the learning rate
End of explanation
"""
model = Sequential()
model.add(Dense(1, input_shape = (3, )))
opt = SGD(lr = 0.1)
model.compile(optimizer = opt, loss = 'mean_squared_error')
model.fit(X_train, Y_train, epochs = 20)
Y_train_predicted = model.predict(X_train)
Y_test_predicted = model.predict(X_test)
train_score = r2_score(Y_train, Y_train_predicted)
test_score = r2_score(Y_test, Y_test_predicted)
print('Train set score: \t{:0.3f}'.format(train_score))
print('Test set score: \t{:0.3f}'.format(test_score))
"""
Explanation: Using SGD
End of explanation
"""
ex2 = pd.read_csv('./data/HR_comma_sep.csv')
ex2.head()
ex2.info()
ex2.describe()
# Accuracy if predicted that all stay
# left = 1 : the employee already left
acc = (1 - (ex2['left'].value_counts()[1] / ex2.shape[0])) * 100
print('Predicting all would stay yields accuracy: \t{:0.2f} %'.format(acc))
len(ex2.columns)
ex2['average_montly_hours'].plot(kind = 'hist',
figsize = (10, 5))
ex2['time_spend_company'].plot(kind = 'hist',
figsize = (10, 5))
minmax = MinMaxScaler()
ex2['average_montly_hours'] = minmax.fit_transform(ex2['average_montly_hours'].reshape(-1,1))
ex2['time_spend_company'] = minmax.fit_transform(ex2['time_spend_company'].reshape(-1,1))
ex2['average_montly_hours'].plot(kind = 'hist',
figsize = (10, 5))
# Sales and salary are categorical data
ex2_dummies = pd.get_dummies(ex2[['sales', 'salary']])
ex2_dummies.head()
del ex2['sales'], ex2['salary']
ex2.head()
len(ex2.columns)
data = pd.concat([ex2, ex2_dummies], axis = 1)
data.head()
X = data.ix[:, data.columns != 'left']
X.head()
len(X.columns)
Y = data['left']
from sklearn.model_selection import train_test_split
X = X.values
Y = Y.values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2)
model = Sequential()
model.add(Dense(1, input_dim = X_train.shape[1], activation = 'sigmoid'))
model.compile(optimizer = Adam(lr = 0.05), loss = 'binary_crossentropy', metrics = ['accuracy'])
model.summary()
model.fit(X_train, Y_train, epochs = 20)
Y_test_predicted = model.predict_classes(X_test)
from sklearn.metrics import confusion_matrix, classification_report
def pretty_confusion_matrix(y_true, y_pred, labels=["False", "True"]):
cm = confusion_matrix(y_true, y_pred)
pred_labels = ['Predicted '+ l for l in labels]
df = pd.DataFrame(cm, index = labels, columns = pred_labels)
return df
pretty_confusion_matrix(Y_test, Y_test_predicted, labels=['Stay', 'Leave'])
print(classification_report(Y_test, Y_test_predicted))
from sklearn.model_selection import cross_val_score, KFold
from keras.wrappers.scikit_learn import KerasClassifier
def build_model():
model = Sequential()
model.add(Dense(1,
input_dim = 20,
activation='sigmoid'))
model.compile(Adam(lr = 0.1), 'binary_crossentropy', metrics=['accuracy'])
return model
model = KerasClassifier(build_fn = build_model,
epochs = 10,
verbose = 0)
from sklearn.model_selection import KFold, cross_val_score
cross_val = KFold(5, shuffle = True)
scores = cross_val_score(model, X, Y, cv = cross_val)
print("The cross validation accuracy is {:0.4f} ± {:0.4f}".format(scores.mean(), scores.std()))
scores
"""
Explanation: Exercise 2
Your boss was extremely happy with your work on the housing price prediction model and decided to entrust you with a more challenging task. They've seen a lot of people leave the company recently and they would like to understand why that's happening. They have collected historical data on employees and they would like you to build a model that is able to predict which employee will leave next. The would like a model that is better than random guessing. They also prefer false negatives than false positives, in this first phase. Fields in the dataset include:
Employee satisfaction level
Last evaluation
Number of projects
Average monthly hours
Time spent at the company
Whether they have had a work accident
Whether they have had a promotion in the last 5 years
Department
Salary
Whether the employee has left
Your goal is to predict the binary outcome variable left using the rest of the data. Since the outcome is binary, this is a classification problem. Here are some things you may want to try out:
load the dataset at ../data/HR_comma_sep.csv, inspect it with .head(), .info() and .describe().
Establish a benchmark: what would be your accuracy score if you predicted everyone stay?
Check if any feature needs rescaling. You may plot a histogram of the feature to decide which rescaling method is more appropriate.
convert the categorical features into binary dummy columns. You will then have to combine them with the numerical features using pd.concat.
do the usual train/test split with a 20% test size
play around with learning rate and optimizer
check the confusion matrix, precision and recall
check if you still get the same results if you use a 5-Fold cross validation on all the data
Is the model good enough for your boss?
As you will see in this exercise, the a logistic regression model is not good enough to help your boss. In the next chapter we will learn how to go beyond linear models.
This dataset comes from https://www.kaggle.com/ludobenistant/hr-analytics/ and is released under CC BY-SA 4.0 License.
End of explanation
"""
|
stonebig/winpython_afterdoc | docs/Winpython_checker.ipynb | mit | import warnings
#warnings.filterwarnings("ignore", category=DeprecationWarning)
#warnings.filterwarnings("ignore", category=UserWarning)
#warnings.filterwarnings("ignore", category=FutureWarning)
# warnings.filterwarnings("ignore") # would silence all warnings
%matplotlib inline
# use %matplotlib widget for the adventurous
"""
Explanation: Winpython Default checker
End of explanation
"""
# checking Numba JIT toolchain
import numpy as np
image = np.zeros((1024, 1536), dtype = np.uint8)
#from pylab import imshow, show
import matplotlib.pyplot as plt
from timeit import default_timer as timer
from numba import jit
@jit
def create_fractal(min_x, max_x, min_y, max_y, image, iters , mandelx):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandelx(real, imag, iters)
image[y, x] = color
@jit
def mandel(x, y, max_iters):
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iters
# Numba speed
start = timer()
create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20 , mandel)
dt = timer() - start
fig = plt.figure()
print ("Mandelbrot created by numba in %f s" % dt)
plt.imshow(image)
plt.show()
"""
Explanation: Compilers: Numba and Cython
Requirement
To get Cython working, Winpython 3.7+ users should install "Microsoft Visual C++ Build Tools 2017" (visualcppbuildtools_full.exe, a 4 Go installation) at https://beta.visualstudio.com/download-visual-studio-vs/
To get Numba working, not-windows10 users may have to install "Microsoft Visual C++ Redistributable pour Visual Studio 2017" (vc_redist) at https://beta.visualstudio.com/download-visual-studio-vs/
Thanks to recent progress, Visual Studio 2017/2018/2019 are cross-compatible now
Compiler toolchains
Numba (a JIT Compiler)
End of explanation
"""
# Cython + Mingwpy compiler toolchain test
%load_ext Cython
%%cython -a
# with %%cython -a , full C-speed lines are shown in white, slowest python-speed lines are shown in dark yellow lines
# ==> put your cython rewrite effort on dark yellow lines
def create_fractal_cython(min_x, max_x, min_y, max_y, image, iters , mandelx):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandelx(real, imag, iters)
image[y, x] = color
def mandel_cython(x, y, max_iters):
cdef int i
cdef double cx, cy , zx, zy
cx , cy = x, y
zx , zy =0 ,0
for i in range(max_iters):
zx , zy = zx*zx - zy*zy + cx , zx*zy*2 + cy
if (zx*zx + zy*zy) >= 4:
return i
return max_iters
#Cython speed
start = timer()
create_fractal_cython(-2.0, 1.0, -1.0, 1.0, image, 20 , mandel_cython)
dt = timer() - start
fig = plt.figure()
print ("Mandelbrot created by cython in %f s" % dt)
plt.imshow(image)
"""
Explanation: Cython (a compiler for writing C extensions for the Python language)
WinPython 3.5 and 3.6 users may not have mingwpy available, and so need "VisualStudio C++ Community Edition 2015" https://www.visualstudio.com/downloads/download-visual-studio-vs#d-visual-c
End of explanation
"""
# Matplotlib 3.4.1
# for more examples, see: http://matplotlib.org/gallery.html
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
ax = plt.figure().add_subplot(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
# Plot the 3D surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
# Plot projections of the contours for each dimension. By choosing offsets
# that match the appropriate axes limits, the projected contours will sit on
# the 'walls' of the graph
cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
# Seaborn
# for more examples, see http://stanford.edu/~mwaskom/software/seaborn/examples/index.html
import seaborn as sns
sns.set()
df = sns.load_dataset("iris")
sns.pairplot(df, hue="species", height=1.5)
# altair-example
import altair as alt
alt.Chart(df).mark_bar().encode(
x=alt.X('sepal_length', bin=alt.Bin(maxbins=50)),
y='count(*):Q',
color='species:N',
#column='species',
).interactive()
# temporary warning removal
import warnings
import matplotlib as mpl
warnings.filterwarnings("ignore", category=mpl.cbook.MatplotlibDeprecationWarning)
# Holoviews
# for more example, see http://holoviews.org/Tutorials/index.html
import numpy as np
import holoviews as hv
hv.extension('matplotlib')
dots = np.linspace(-0.45, 0.45, 11)
fractal = hv.Image(image)
layouts = {y: (fractal * hv.Points(fractal.sample([(i,y) for i in dots])) +
fractal.sample(y=y) )
for y in np.linspace(0, 0.45,11)}
hv.HoloMap(layouts, kdims=['Y']).collate().cols(2)
# Bokeh 0.12.5
import numpy as np
from six.moves import zip
from bokeh.plotting import figure, show, output_notebook
N = 4000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = ["#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)]
output_notebook()
TOOLS="hover,crosshair,pan,wheel_zoom,box_zoom,reset,tap,save,box_select,poly_select,lasso_select"
p = figure(tools=TOOLS)
p.scatter(x,y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)
show(p)
# Datashader (holoviews+Bokeh)
import datashader as ds
import numpy as np
import holoviews as hv
from holoviews import opts
from holoviews.operation.datashader import datashade, shade, dynspread, spread, rasterize
from holoviews.operation import decimate
hv.extension('bokeh')
decimate.max_samples=1000
dynspread.max_px=20
dynspread.threshold=0.5
def random_walk(n, f=5000):
"""Random walk in a 2D space, smoothed with a filter of length f"""
xs = np.convolve(np.random.normal(0, 0.1, size=n), np.ones(f)/f).cumsum()
ys = np.convolve(np.random.normal(0, 0.1, size=n), np.ones(f)/f).cumsum()
xs += 0.1*np.sin(0.1*np.array(range(n-1+f))) # add wobble on x axis
xs += np.random.normal(0, 0.005, size=n-1+f) # add measurement noise
ys += np.random.normal(0, 0.005, size=n-1+f)
return np.column_stack([xs, ys])
def random_cov():
"""Random covariance for use in generating 2D Gaussian distributions"""
A = np.random.randn(2,2)
return np.dot(A, A.T)
np.random.seed(1)
points = hv.Points(np.random.multivariate_normal((0,0), [[0.1, 0.1], [0.1, 1.0]], (50000,)),label="Points")
paths = hv.Path([0.15*random_walk(10000) for i in range(10)], kdims=["u","v"], label="Paths")
decimate(points) + rasterize(points) + rasterize(paths)
ropts = dict(colorbar=True, tools=["hover"], width=350)
rasterize( points).opts(cmap="kbc_r", cnorm="linear").relabel('rasterize()').opts(**ropts).hist() + \
dynspread(datashade( points, cmap="kbc_r", cnorm="linear").relabel("datashade()"))
#bqplot
from IPython.display import display
from bqplot import (Figure, Map, Mercator, Orthographic, ColorScale, ColorAxis,
AlbersUSA, topo_load, Tooltip)
def_tt = Tooltip(fields=['id', 'name'])
map_mark = Map(scales={'projection': Mercator()}, tooltip=def_tt)
map_mark.interactions = {'click': 'select', 'hover': 'tooltip'}
fig = Figure(marks=[map_mark], title='Interactions Example')
display(fig)
# ipyleaflet (javascript library usage)
from ipyleaflet import (
Map, Marker, TileLayer, ImageOverlay, Polyline, Polygon,
Rectangle, Circle, CircleMarker, GeoJSON, DrawControl
)
from traitlets import link
center = [34.6252978589571, -77.34580993652344]
m = Map(center=[34.6252978589571, -77.34580993652344], zoom=10)
dc = DrawControl()
def handle_draw(self, action, geo_json):
print(action)
print(geo_json)
m
m
dc.on_draw(handle_draw)
m.add_control(dc)
%matplotlib widget
# Testing matplotlib interactions with a simple plot
import matplotlib.pyplot as plt
import numpy as np
# warning ; you need to launch a second time %matplotlib widget, if after a %matplotlib inline
%matplotlib widget
fig = plt.figure() #plt.figure(1)
plt.plot(np.sin(np.linspace(0, 20, 100)))
plt.show()
# plotnine: giving a taste of ggplot of R langage (formerly we were using ggpy)
from plotnine import ggplot, aes, geom_blank, geom_point, stat_smooth, facet_wrap, theme_bw
from plotnine.data import mtcars
ggplot(mtcars, aes(x='hp', y='wt', color='mpg')) + geom_point() +\
facet_wrap("~cyl") + theme_bw()
"""
Explanation: Graphics: Matplotlib, Pandas, Seaborn, Holoviews, Bokeh, bqplot, ipyleaflet, plotnine
End of explanation
"""
import IPython;IPython.__version__
# Audio Example : https://github.com/ipython/ipywidgets/blob/master/examples/Beat%20Frequencies.ipynb
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interactive
from IPython.display import Audio, display
def beat_freq(f1=220.0, f2=224.0):
max_time = 3
rate = 8000
times = np.linspace(0,max_time,rate*max_time)
signal = np.sin(2*np.pi*f1*times) + np.sin(2*np.pi*f2*times)
print(f1, f2, abs(f1-f2))
display(Audio(data=signal, rate=rate))
try:
plt.plot(signal); #plt.plot(v.result);
except:
pass
return signal
v = interactive(beat_freq, f1=(200.0,300.0), f2=(200.0,300.0))
display(v)
# Networks graph Example : https://github.com/ipython/ipywidgets/blob/master/examples/Exploring%20Graphs.ipynb
%matplotlib inline
from ipywidgets import interact
import matplotlib.pyplot as plt
import networkx as nx
# wrap a few graph generation functions so they have the same signature
def random_lobster(n, m, k, p):
return nx.random_lobster(n, p, p / m)
def powerlaw_cluster(n, m, k, p):
return nx.powerlaw_cluster_graph(n, m, p)
def erdos_renyi(n, m, k, p):
return nx.erdos_renyi_graph(n, p)
def newman_watts_strogatz(n, m, k, p):
return nx.newman_watts_strogatz_graph(n, k, p)
@interact(n=(2,30), m=(1,10), k=(1,10), p=(0.0, 1.0, 0.001),
generator={'lobster': random_lobster,
'power law': powerlaw_cluster,
'Newman-Watts-Strogatz': newman_watts_strogatz,
u'Erdős-Rényi': erdos_renyi,
})
def plot_random_graph(n, m, k, p, generator):
g = generator(n, m, k, p)
nx.draw(g)
plt.title(generator.__name__)
plt.show()
"""
Explanation: Ipython Notebook: Interactivity & other
End of explanation
"""
# checking statsmodels
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import statsmodels.api as sm
data = sm.datasets.anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
plt.rcParams['figure.figsize'] = (6.0, 4.0) # make plot larger in notebook
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30}
sm.graphics.beanplot(age, ax=ax, labels=labels,
plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent")
ax.set_ylabel("Age")
plt.show()
# lmfit test (from http://nbviewer.ipython.org/github/lmfit/lmfit-py/blob/master/examples/lmfit-model.ipynb)
import numpy as np
import matplotlib.pyplot as plt
def decay(t, N, tau):
return N*np.exp(-t/tau)
t = np.linspace(0, 5, num=1000)
data = decay(t, 7, 3) + np.random.randn(*t.shape)
from lmfit import Model
model = Model(decay, independent_vars=['t'])
result = model.fit(data, t=t, N=10, tau=1)
fig = plt.figure() # necessary to separate from previous ploot with %matplotlib widget
plt.plot(t, data) # data
plt.plot(t, decay(t=t, **result.values), color='orange', linewidth=5) # best-fit model
"""
Explanation: Mathematical: statsmodels, lmfit,
End of explanation
"""
#Pandas
import pandas as pd
import numpy as np
idx = pd.date_range('2000', '2005', freq='d', closed='left')
datas = pd.DataFrame({'Color': [ 'green' if x> 1 else 'red' for x in np.random.randn(len(idx))],
'Measure': np.random.randn(len(idx)), 'Year': idx.year},
index=idx.date)
datas.head()
"""
Explanation: DataFrames: Pandas, Dask
End of explanation
"""
datas.query('Measure > 0').groupby(['Color','Year']).size().unstack()
"""
Explanation: Split / Apply / Combine
Split your data into multiple independent groups.
Apply some function to each group.
Combine your groups back into a single data object.
End of explanation
"""
# checking Web Scraping: beautifulsoup and requests
import requests
from bs4 import BeautifulSoup
URL = 'http://en.wikipedia.org/wiki/Franklin,_Tennessee'
req = requests.get(URL, headers={'User-Agent' : "Mining the Social Web"})
soup = BeautifulSoup(req.text, "lxml")
geoTag = soup.find(True, 'geo')
if geoTag and len(geoTag) > 1:
lat = geoTag.find(True, 'latitude').string
lon = geoTag.find(True, 'longitude').string
print ('Location is at', lat, lon)
elif geoTag and len(geoTag) == 1:
(lat, lon) = geoTag.string.split(';')
(lat, lon) = (lat.strip(), lon.strip())
print ('Location is at', lat, lon)
else:
print ('No location found')
"""
Explanation: Web Scraping: Beautifulsoup
End of explanation
"""
# Pulp example : minimizing the weight to carry 99 pennies
# (from Philip I Thomas)
# see https://www.youtube.com/watch?v=UmMn-N5w-lI#t=995
# Import PuLP modeler functions
from pulp import *
# The prob variable is created to contain the problem data
prob = LpProblem("99_pennies_Problem",LpMinimize)
# Variables represent how many of each coin we want to carry
pennies = LpVariable("Number_of_pennies",0,None,LpInteger)
nickels = LpVariable("Number_of_nickels",0,None,LpInteger)
dimes = LpVariable("Number_of_dimes",0,None,LpInteger)
quarters = LpVariable("Number_of_quarters",0,None,LpInteger)
# The objective function is added to 'prob' first
# we want to minimize (LpMinimize) this
prob += 2.5 * pennies + 5 * nickels + 2.268 * dimes + 5.670 * quarters, "Total_coins_Weight"
# We want exactly 99 cents
prob += 1 * pennies + 5 * nickels + 10 * dimes + 25 * quarters == 99, ""
# The problem data is written to an .lp file
prob.writeLP("99cents.lp")
prob.solve()
# print ("status",LpStatus[prob.status] )
print ("Minimal Weight to carry exactly 99 pennies is %s grams" % value(prob.objective))
# Each of the variables is printed with it's resolved optimum value
for v in prob.variables():
print (v.name, "=", v.varValue)
"""
Explanation: Operations Research: Pulp
End of explanation
"""
# checking sympy
import sympy
a, b =sympy.symbols('a b')
e=(a+b)**5
e.expand()
"""
Explanation: Deep Learning: see tutorial-first-neural-network-python-keras
Symbolic Calculation: sympy
End of explanation
"""
# checking Ipython-sql, sqlparse, SQLalchemy
%load_ext sql
%%sql sqlite:///.baresql.db
DROP TABLE IF EXISTS writer;
CREATE TABLE writer (first_name, last_name, year_of_death);
INSERT INTO writer VALUES ('William', 'Shakespeare', 1616);
INSERT INTO writer VALUES ('Bertold', 'Brecht', 1956);
SELECT * , sqlite_version() as sqlite_version from Writer order by Year_of_death
# checking baresql
from __future__ import print_function, unicode_literals, division # line needed only if Python2.7
from baresql import baresql
bsql = baresql.baresql(connection="sqlite:///.baresql.db")
bsqldf = lambda q: bsql.df(q, dict(globals(),**locals()))
users = ['Alexander', 'Billy', 'Charles', 'Danielle', 'Esmeralda', 'Franz', 'Greg']
# We use the python 'users' list like a SQL table
sql = "select 'Welcome ' || c0 || ' !' as say_hello, length(c0) as name_length from users$$ where c0 like '%a%' "
bsqldf(sql)
# Transfering Datas to sqlite, doing transformation in sql, going back to Pandas and Matplotlib
bsqldf('''
select Color, Year, count(*) as size
from datas$$
where Measure > 0
group by Color, Year'''
).set_index(['Year', 'Color']).unstack().plot(kind='bar')
# checking db.py
from db import DB
db=DB(dbtype="sqlite", filename=".baresql.db")
db.query("select sqlite_version() as sqlite_version ;")
db.tables
# checking sqlite_bro: this should lanch a separate non-browser window with sqlite_bro's welcome
!cmd start cmd /C sqlite_bro
# pyodbc or pypyodbc or ceODBC
try:
import pyodbc
except ImportError:
import pypyodbc as pyodbc # on PyPy, there is no pyodbc currently
# look for pyodbc providers
sources = pyodbc.dataSources()
dsns = list(sources.keys())
sl = [' %s [%s]' % (dsn, sources[dsn]) for dsn in dsns]
print("pyodbc Providers: (beware 32/64 bit driver and python version must match)\n", '\n'.join(sl))
# pythonnet
import clr
clr.AddReference("System.Data")
clr.AddReference('System.Data.Common')
import System.Data.OleDb as ADONET
import System.Data.Odbc as ODBCNET
import System.Data.Common as DATACOM
table = DATACOM.DbProviderFactories.GetFactoryClasses()
print("\n .NET Providers: (beware 32/64 bit driver and python version must match)")
for row in table.Rows:
print(" %s" % row[table.Columns[0]])
print(" ",[row[column] for column in table.Columns if column != table.Columns[0]])
"""
Explanation: SQL tools: sqlite, Ipython-sql, sqlite_bro, baresql, db.py
End of explanation
"""
# optional scipy full test (takes up to 10 minutes)
#!cmd /C start cmd /k python.exe -c "import scipy;scipy.test()"
%pip list
!jupyter labextension list
!pip check
!pipdeptree
!pipdeptree -p pip
"""
Explanation: Qt libraries Demo
See Dedicated Qt Libraries Demo
Wrap-up
End of explanation
"""
|
ProfessorKazarinoff/staticsite | content/code/matplotlib_plots/stress_strain_curves/stress_strain_curve_with_python.ipynb | gpl-3.0 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
print("NumPy version:",np.__version__)
print("Pandas version:",pd.__version__)
"""
Explanation: In this post, we'll use data from a tensile test to build a stress strain curve with Python and Matplotlib.
A tensile test is a type of mechanical test performed by engineers used to determine the mechanical properties of a material. Engineering metal alloys such as steel and aluminum alloys are tensile tested in order to determine their strength and stiffness. Tensile tests are performed in a piece of equipment called a mechanical test frame.
After a tensile test is complete, a set of data is produced by the mechanical test frame. Using the data acquired during a tensile test, a stress-strain curve can be produced.
In this post, we will create a stress-strain curve (a plot) from a set of tensile test data of a steel 1045 sample and an aluminum 6061 sample. The stress strain curve we construct will have the following features:
A descriptive title
Axes labels with units
Two lines on the same plot. One line for steel 1045 and one line for aluminum 6061
A legend
Install Python
We are going to build our stress strain curve with Python and a Jupyter notebook. I suggest engineers and problem-solvers download and install the Anaconda distribution of Python. See this post to learn how to install Anaconda on your computer. Alternatively, you can download Python form Python.org or download Python the Microsoft Store.
Install Jupyter, NumPy, Pandas, and Matplotlib
Once Python is installed, the next thing we need to do is install a couple of Python packages. If you are using the Anaconda distribution of Python, the packages we are going to use to build the plot: Jupyter, NumPy, Pandas, and Matplotlib come pre-installed and no additional installation steps are necessary.
However, if you downloaded Python from Python.org or installed Python using the Microsoft Store, you will need to install install Jupyter, NumPy, Pandas, and Matplotlib separately. You can install Jupyter, NumPy, Pandas, and Matplotlib with pip (the Python package manager) or install theses four packages with the Anaconda Prompt.
If you are using a terminal and pip, type:
```text
pip install jupyter numpy pandas matplotlib
```
If you have Anaconda installed and use the Anaconda Prompt, type:
```text
conda install jupyter numpy pandas matplotlib
```
Open a Jupyter notebook
We will construct our stress strain curve using a Jupyter notebook. See this post to see how to open a Jupyter notebook.
Make sure to save your Jupyter notebook with a recognizable name.
Download the data and move the data into the same folder as the Jupyter notebook
Next, we need to download the two data files that we will use to build our stress-strain curve. You can download sample data using the links below:
steel1045.xls
aluminum6061.xls
After these .xls files are downloaded, both .xls files need to be moved into the same folder as our Jupyter notebook.
Import NumPy, Pandas, and Matplotlib
Now that our Jupyter notebook is open and the two .xls data files are in the same folder as the Jupyter notebook, we can start coding and build our plot.
At the top of the Jupyter notebook, import NumPy, Pandas and Matplotlib. The command %matplotlib inline is included so that our plot will display directly inside our Jupyter notebook. If you are using a .py file instead of a Jupyter notebook, make sure to comment out %matplotlib inline as this line is not valid Python code.
We will also print out the versions of our NumPy and Pandas packages using the .__version__ attribute. If the versions of NumPy and Pandas prints out, that means that NumPy and Pandas are installed and we can use these packages in our code.
End of explanation
"""
%ls
"""
Explanation: Ensure the two .xls data files are in the same folder as the Jupyter notebook
Before we proceed, let's make sure the two .xls data files are in the same folder as our running Jupyter notebook. We'll use a Jupyter notebook magic command to print out the contents of the folder that our notebook is in. The %ls command lists the contents of the current folder.
End of explanation
"""
steel_df = pd.read_excel("steel1045.xls")
al_df = pd.read_excel("aluminum6061.xls")
"""
Explanation: We can see our Jupyter notebook stress_strain_curve_with_python.ipynb as well as the two .xls data files aluminum6061.xls and steel1045.xls are in our current folder.
Now that we are sure the two .xls data files are in the same folder as our notebook, we can import the data in the two two .xls files using Panda's pd.read_excel() function. The data from the two excel files will be stored in two Pandas dataframes called steel_df and al_df.
End of explanation
"""
steel_df.head()
al_df.head()
"""
Explanation: We can use Pandas .head() method to view the first five rows of each dataframe.
End of explanation
"""
strain_steel = steel_df['CH5']*0.01
d_steel = 0.506 # test bar diameter = 0.506 inches
stress_steel = (steel_df['FORCE']*0.001)/(np.pi*((d_steel/2)**2))
strain_al = al_df['CH5']*0.01
d_al = 0.506 # test bar diameter = 0.506 inches
stress_al = (al_df['FORCE']*0.001)/(np.pi*((d_al/2)**2))
"""
Explanation: We see a number of columns in each dataframe. The columns we are interested in are FORCE, EXT, and CH5. Below is a description of what these columns mean.
FORCE Force measurements from the load cell in pounds (lb), force in pounds
EXT Extension measurements from the mechanical extensometer in percent (%), strain in percent
CH5 Extension readings from the laser extensometer in percent (%), strain in percent
Create stress and strain series from the FORCE, EXT, and CH5 columns
Next we'll create a four Pandas series from the ['CH5'] and ['FORCE'] columns of our al_df and steel_df dataframes. The equations below show how to calculate stress, $\sigma$, and strain, $\epsilon$, from force $F$ and cross-sectional area $A$. Cross-sectional area $A$ is the formula for the area of a circle. For the steel and aluminum samples we tested, the diameter $d$ was $0.506 \ in$.
$$ \sigma = \frac{F}{A_0} $$
$$ F \ (kip) = F \ (lb) \times 0.001 $$
$$ A_0 = \pi (d/2)^2 $$
$$ d = 0.506 \ in $$
$$ \epsilon \ (unitless) = \epsilon \ (\%) \times 0.01 $$
End of explanation
"""
plt.plot(strain_steel,stress_steel,strain_al,stress_al)
plt.show()
"""
Explanation: Build a quick plot
Now that we have the data from the tensile test in four series, we can build a quick plot using Matplotlib's plt.plot() method. The first x,y pair we pass to plt.plot() is strain_steel,stress_steel and the second x,y pair we pass in is strain_al,stress_al. The command plt.show() shows the plot.
End of explanation
"""
plt.plot(strain_steel,stress_steel,strain_al,stress_al)
plt.xlabel('strain (in/in)')
plt.ylabel('stress (ksi)')
plt.title('Stress Strain Curve of Steel 1045 and Aluminum 6061 in tension')
plt.legend(['Steel 1045','Aluminum 6061'])
plt.show()
"""
Explanation: We see a plot with two lines. One line represents the steel sample and one line represents the aluminum sample. We can improve our plot by adding axis labels with units, a title and a legend.
Add axis labels, title and a legend
Axis labels, titles and a legend are added to our plot with three Matplotlib methods. The methods are summarized in the table below.
| Matplotlib method | description | example |
| --- | --- | --- |
| plt.xlabel() | x-axis label | plt.xlabel('strain (in/in)') |
| plt.ylabel() | y-axis label | plt.ylabel('stress (ksi)') |
| plt.title() | plot title | plt.title('Stress Strain Curve') |
| plt.legend() | legend | plt.legend(['steel','aluminum']) |
The code cell below shows these four methods in action and produces a plot.
End of explanation
"""
plt.plot(strain_steel,stress_steel,strain_al,stress_al)
plt.xlabel('strain (in/in)')
plt.ylabel('stress (ksi)')
plt.title('Stress Strain Curve of Steel 1045 and Aluminum 6061 in tension')
plt.legend(['Steel 1045','Aluminum 6061'])
plt.savefig('stress-strain_curve.png', dpi=300, bbox_inches='tight')
plt.show()
"""
Explanation: The plot we see has two lines, axis labels, a title and a legend. Next we'll save the plot to a .png image file.
Save the plot as a .png image
Now we can save the plot as a .png image using Matplotlib's plt.savefig() method. The code cell below builds the plot and saves an image file called stress-strain_curve.png. The argument dpi=300 inside of Matplotlib's plt.savefig() method specifies the resolution of our saved image. The image stress-strain_curve.png will be saved in the same folder as our running Jupyter notebook.
End of explanation
"""
|
tensorflow/tensorrt | tftrt/examples/image_classification/NGC-TFv2-TF-TRT-inference-from-Keras-saved-model.ipynb | apache-2.0 | # Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: <a href="https://colab.research.google.com/github/tensorflow/tensorrt/blob/r2.0/tftrt/examples/image_classification/TFv2-TF-TRT-inference-from-Keras-saved-model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
End of explanation
"""
!nvidia-smi
"""
Explanation: <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
TF-TRT Inference from Keras Model with TensorFlow 2.0
Introduction
The NVIDIA TensorRT is a C++ library that facilitates high performance inference on NVIDIA graphics processing units (GPUs). TensorRT takes a trained network, which consists of a network definition and a set of trained parameters, and produces a highly optimized runtime engine which performs inference for that network.
TensorFlow™ integration with TensorRT™ (TF-TRT) optimizes and executes compatible subgraphs, allowing TensorFlow to execute the remaining graph. While you can still use TensorFlow's wide and flexible feature set, TensorRT will parse the model and apply optimizations to the portions of the graph wherever possible.
In this notebook, we demonstrate the process of optimizing a ResNet-50 model with a TF-TRT.
Requirement
GPU
This demo will work on any NVIDIA GPU with CUDA cores, though for improved FP16 and INT8 inference, a Volta, Turing, Ampere or newer generation GPU with Tensor cores is desired.
End of explanation
"""
!pip install pillow matplotlib
"""
Explanation: Install Dependencies
End of explanation
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.saved_model import tag_constants
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
print("Tensorflow version: ", tf.version.VERSION)
# check TensorRT version
print("TensorRT version: ")
!dpkg -l | grep nvinfer
"""
Explanation: Importing required libraries
End of explanation
"""
from tensorflow.python.client import device_lib
def check_tensor_core_gpu_present():
local_device_protos = device_lib.list_local_devices()
for line in local_device_protos:
if "compute capability" in str(line):
compute_capability = float(line.physical_device_desc.split("compute capability: ")[-1])
if compute_capability>=7.0:
return True
print("Tensor Core GPU Present:", check_tensor_core_gpu_present())
tensor_core_gpu = check_tensor_core_gpu_present()
"""
Explanation: A successfull TensorRT installation looks like:
TensorRT version:
ii libnvinfer5 5.1.5-1+cuda10.1 amd64 TensorRT runtime libraries
Check Tensor core GPU
The below code check whether a Tensor-core GPU is present.
End of explanation
"""
!mkdir ./data
!wget -O ./data/img0.JPG "https://d17fnq9dkz9hgj.cloudfront.net/breed-uploads/2018/08/siberian-husky-detail.jpg?bust=1535566590&width=630"
!wget -O ./data/img1.JPG "https://www.hakaimagazine.com/wp-content/uploads/header-gulf-birds.jpg"
!wget -O ./data/img2.JPG "https://www.artis.nl/media/filer_public_thumbnails/filer_public/00/f1/00f1b6db-fbed-4fef-9ab0-84e944ff11f8/chimpansee_amber_r_1920x1080.jpg__1920x1080_q85_subject_location-923%2C365_subsampling-2.jpg"
!wget -O ./data/img3.JPG "https://www.familyhandyman.com/wp-content/uploads/2018/09/How-to-Avoid-Snakes-Slithering-Up-Your-Toilet-shutterstock_780480850.jpg"
from tensorflow.keras.preprocessing import image
fig, axes = plt.subplots(nrows=2, ncols=2)
for i in range(4):
img_path = './data/img%d.JPG'%i
img = image.load_img(img_path, target_size=(224, 224))
plt.subplot(2,2,i+1)
plt.imshow(img);
plt.axis('off');
"""
Explanation: Data
We download several random images for testing from the Internet.
End of explanation
"""
model = ResNet50(weights='imagenet')
"""
Explanation: Model
We next download and test a ResNet-50 pre-trained model from the Keras model zoo.
End of explanation
"""
for i in range(4):
img_path = './data/img%d.JPG'%i
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('{} - Predicted: {}'.format(img_path, decode_predictions(preds, top=3)[0]))
plt.subplot(2,2,i+1)
plt.imshow(img);
plt.axis('off');
plt.title(decode_predictions(preds, top=3)[0][0][1])
# Save the entire model as a SavedModel.
model.save('resnet50_saved_model')
!saved_model_cli show --all --dir resnet50_saved_model
"""
Explanation: Before proceeding, let's quick take a look at the predictions
End of explanation
"""
model = tf.keras.models.load_model('resnet50_saved_model')
batch_size = 8
batched_input = np.zeros((batch_size, 224, 224, 3), dtype=np.float32)
for i in range(batch_size):
img_path = './data/img%d.JPG' % (i % 4)
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
batched_input[i, :] = x
batched_input = tf.constant(batched_input)
print('batched_input shape: ', batched_input.shape)
# Benchmarking throughput
N_warmup_run = 50
N_run = 1000
elapsed_time = []
for i in range(N_warmup_run):
preds = model.predict(batched_input)
for i in range(N_run):
start_time = time.time()
preds = model.predict(batched_input)
end_time = time.time()
elapsed_time = np.append(elapsed_time, end_time - start_time)
if i % 50 == 0:
print('Step {}: {:4.1f}ms'.format(i, (elapsed_time[-50:].mean()) * 1000))
print('Throughput: {:.0f} images/s'.format(N_run * batch_size / elapsed_time.sum()))
"""
Explanation: TF-TRT takes input as aTensorFlow saved model, therefore, we re-export the Keras model as a TF saved model.
Benchmarking Inference with native TF2.0 saved model
End of explanation
"""
print('Converting to TF-TRT FP32...')
converter = trt.TrtGraphConverterV2(input_saved_model_dir='resnet50_saved_model',
precision_mode=trt.TrtPrecisionMode.FP32,
max_workspace_size_bytes=8000000000)
converter.convert()
converter.save(output_saved_model_dir='resnet50_saved_model_TFTRT_FP32')
print('Done Converting to TF-TRT FP32')
!saved_model_cli show --all --dir resnet50_saved_model_TFTRT_FP32
"""
Explanation: TF-TRT FP32 model
We first convert the TF native FP32 model to a TF-TRT FP32 model.
End of explanation
"""
def predict_tftrt(input_saved_model):
"""Runs prediction on a single image and shows the result.
input_saved_model (string): Name of the input model stored in the current dir
"""
img_path = './data/img0.JPG' # Siberian_husky
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
x = tf.constant(x)
saved_model_loaded = tf.saved_model.load(input_saved_model, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
print(signature_keys)
infer = saved_model_loaded.signatures['serving_default']
print(infer.structured_outputs)
labeling = infer(x)
preds = labeling['predictions'].numpy()
print('{} - Predicted: {}'.format(img_path, decode_predictions(preds, top=3)[0]))
plt.subplot(2,2,1)
plt.imshow(img);
plt.axis('off');
plt.title(decode_predictions(preds, top=3)[0][0][1])
def benchmark_tftrt(input_saved_model):
saved_model_loaded = tf.saved_model.load(input_saved_model, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
N_warmup_run = 50
N_run = 1000
elapsed_time = []
for i in range(N_warmup_run):
labeling = infer(batched_input)
for i in range(N_run):
start_time = time.time()
labeling = infer(batched_input)
end_time = time.time()
elapsed_time = np.append(elapsed_time, end_time - start_time)
if i % 50 == 0:
print('Step {}: {:4.1f}ms'.format(i, (elapsed_time[-50:].mean()) * 1000))
print('Throughput: {:.0f} images/s'.format(N_run * batch_size / elapsed_time.sum()))
predict_tftrt('resnet50_saved_model_TFTRT_FP32')
"""
Explanation: Before proceeding, let's write a couple of utility functions to use the model for prediction and benchmarking purposes.
End of explanation
"""
benchmark_tftrt('resnet50_saved_model_TFTRT_FP32')
"""
Explanation: Next, we load and test the TF-TRT FP32 model.
End of explanation
"""
print('Converting to TF-TRT FP16...')
converter = trt.TrtGraphConverterV2(input_saved_model_dir='resnet50_saved_model',
precision_mode=trt.TrtPrecisionMode.FP16,
max_workspace_size_bytes=8000000000)
converter.convert()
converter.save(output_saved_model_dir='resnet50_saved_model_TFTRT_FP16')
print('Done Converting to TF-TRT FP16')
predict_tftrt('resnet50_saved_model_TFTRT_FP16')
benchmark_tftrt('resnet50_saved_model_TFTRT_FP16')
"""
Explanation: TF-TRT FP16 model
We next convert the native TF FP32 saved model to TF-TRT FP16 model.
End of explanation
"""
import os
os.kill(os.getpid(), 9)
"""
Explanation: TF-TRT INT8 model
Creating TF-TRT INT8 model requires a small calibration dataset. This data set ideally should represent the test data in production well, and will be used to create a value histogram for each layer in the neural network for effective 8-bit quantization.
Herein, for demonstration purposes, we take only the 4 images that we downloaded for calibration. In production, this set should be more representative of the production data.
End of explanation
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.saved_model import tag_constants
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
batch_size = 8
batched_input = np.zeros((batch_size, 224, 224, 3), dtype=np.float32)
for i in range(batch_size):
img_path = './data/img%d.JPG' % (i % 4)
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
batched_input[i, :] = x
batched_input = tf.constant(batched_input)
print('batched_input shape: ', batched_input.shape)
def calibration_input_fn():
yield (batched_input, )
print('Converting to TF-TRT INT8...')
converter = trt.TrtGraphConverterV2(input_saved_model_dir='resnet50_saved_model',
precision_mode=trt.TrtPrecisionMode.INT8,
max_workspace_size_bytes=8000000000)
converter.convert(calibration_input_fn=calibration_input_fn)
converter.save(output_saved_model_dir='resnet50_saved_model_TFTRT_INT8')
print('Done Converting to TF-TRT INT8')
def predict_tftrt(input_saved_model):
"""Runs prediction on a single image and shows the result.
input_saved_model (string): Name of the input model stored in the current dir
"""
img_path = './data/img0.JPG' # Siberian_husky
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
x = tf.constant(x)
saved_model_loaded = tf.saved_model.load(input_saved_model, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
print(signature_keys)
infer = saved_model_loaded.signatures['serving_default']
print(infer.structured_outputs)
labeling = infer(x)
preds = labeling['predictions'].numpy()
print('{} - Predicted: {}'.format(img_path, decode_predictions(preds, top=3)[0]))
plt.subplot(2,2,1)
plt.imshow(img);
plt.axis('off');
plt.title(decode_predictions(preds, top=3)[0][0][1])
def benchmark_tftrt(input_saved_model):
saved_model_loaded = tf.saved_model.load(input_saved_model, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
N_warmup_run = 50
N_run = 1000
elapsed_time = []
for i in range(N_warmup_run):
labeling = infer(batched_input)
for i in range(N_run):
start_time = time.time()
labeling = infer(batched_input)
#prob = labeling['probs'].numpy()
end_time = time.time()
elapsed_time = np.append(elapsed_time, end_time - start_time)
if i % 50 == 0:
print('Step {}: {:4.1f}ms'.format(i, (elapsed_time[-50:].mean()) * 1000))
print('Throughput: {:.0f} images/s'.format(N_run * batch_size / elapsed_time.sum()))
predict_tftrt('resnet50_saved_model_TFTRT_INT8')
benchmark_tftrt('resnet50_saved_model_TFTRT_INT8')
"""
Explanation: Adding data loading and util functions again from above for to ease the notebook flow after reseting the kernel.
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.12.1/examples/notebooks/generated/statespace_arma_0.ipynb | bsd-3-clause | %matplotlib inline
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
"""
Explanation: Autoregressive Moving Average (ARMA): Sunspots data
This notebook replicates the existing ARMA notebook using the statsmodels.tsa.statespace.SARIMAX class rather than the statsmodels.tsa.ARMA class.
End of explanation
"""
print(sm.datasets.sunspots.NOTE)
dta = sm.datasets.sunspots.load_pandas().data
dta.index = pd.Index(pd.date_range("1700", end="2009", freq="A-DEC"))
del dta["YEAR"]
dta.plot(figsize=(12,4));
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2)
arma_mod20 = sm.tsa.statespace.SARIMAX(dta, order=(2,0,0), trend='c').fit(disp=False)
print(arma_mod20.params)
arma_mod30 = sm.tsa.statespace.SARIMAX(dta, order=(3,0,0), trend='c').fit(disp=False)
print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)
print(arma_mod30.params)
print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic)
"""
Explanation: Sunspots Data
End of explanation
"""
sm.stats.durbin_watson(arma_mod30.resid)
fig = plt.figure(figsize=(12,4))
ax = fig.add_subplot(111)
ax = plt.plot(arma_mod30.resid)
resid = arma_mod30.resid
stats.normaltest(resid)
fig = plt.figure(figsize=(12,4))
ax = fig.add_subplot(111)
fig = qqplot(resid, line='q', ax=ax, fit=True)
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(resid, lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)
r,q,p = sm.tsa.acf(resid, fft=True, qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
print(table.set_index('lag'))
"""
Explanation: Does our model obey the theory?
End of explanation
"""
predict_sunspots = arma_mod30.predict(start='1990', end='2012', dynamic=True)
fig, ax = plt.subplots(figsize=(12, 8))
dta.loc['1950':].plot(ax=ax)
predict_sunspots.plot(ax=ax, style='r');
def mean_forecast_err(y, yhat):
return y.sub(yhat).mean()
mean_forecast_err(dta.SUNACTIVITY, predict_sunspots)
"""
Explanation: This indicates a lack of fit.
In-sample dynamic prediction. How good does our model do?
End of explanation
"""
|
neoscreenager/JupyterNotebookWhirlwindTourOfPython | .ipynb_checkpoints/whirlwind-checkpoint.ipynb | gpl-3.0 | # set the midpoint
midpoint = 25
# make two empty lists
lower = []; upper = []
# split the numbers into lower and upper
for i in range(50):
if (i < midpoint):
lower.append(i)
# print("i lower = ",i)
else:
upper.append(i)
# print("i upper = ",i)
print "lower:", lower
print "upper:", upper
"""
Explanation: print "hello astronomy"
End of explanation
"""
x = 1 # x is an integer
x = 'hello' # now x is a string
x = [1, 2, 3] # now x is a list
# All python variables are pointers, so if we have two variable pointing to same mutable object, then changing
# one will change other as well.
x = [1, 2, 3]
y = x
# both x and y are pointing to same object
print y
x.append(4) # append 4 to the list pointed to by x
print y # y's list is modified as well!
"""
Explanation: Note: Most widely used style guide in Python is known as PEP8, and can be found at https://www.python.org/dev/peps/pep-0008/.
Python is dynamically-typed: variable names can point to objects of any type:
End of explanation
"""
x = 'something else'
print(y) # y is unchanged because now x is pointing to other object, but the previously pointed object is not changed
#Everything is Object
x = 'A string'
type(x)
x = 89
type(x)
x = 89.25
type(x)
#converting decimal to binary
print "binary 10:", bin(10)
# OR operation
print "4 OR 10: ", bin(4 | 10)
-1 == ~0
1 in [1, 2, 3]
2 not in [1, 2, 3]
"""
Explanation: This behavior might seem confusing if you're wrongly thinking of variables as buckets that contain data. But if you're correctly thinking of variables as pointers to objects, then this behavior makes sense.
Note also that if we use "=" to assign another value to x, this will not affect the value of y – assignment is simply a change of what object the variable points to:
End of explanation
"""
2 ** 200
for i in range(2):
print(i)
i = 0
while i < 2:
print(i)
i += 1
def _fibonacci(N,a=0,b=1):
try:
if N < 0:
raise ValueError #if not raised, this program will not raise any error,insted blank vector will be returned
L = []
# a, b = 0, 1 #getting a and b as default arguments
while len(L) < N:
a, b = b, a + b
L.append(a)
return L
except ValueError:
print "ValueError : N must be non-negative"
_fibonacci(10)
_fibonacci(-10)
def catch_all(*args, **kwargs):
print("args =", args)
print("kwargs = ", kwargs)
catch_all(1, 2, 3, a=4, b=5)
data = [{'first':'Guido', 'last':'Van Rossum', 'YOB':1956},
{'first':'Grace', 'last':'Hopper', 'YOB':1906},
{'first':'Alan', 'last':'Turing', 'YOB':1912}]
# sort alphabetically by first name
sorted(data, key=lambda item: item['first'])
def safe_divide(a, b):
try: #try
return a / b
except ZeroDivisionError: # catch
print "ZeroDivisionError occoured and caught"
return 1E100
except TypeError:
print "TypeError occoured and caught"
safe_divide(1, 2) #no error
safe_divide(2, 0) #ZeroDivisionError
safe_divide(2,'str') #TypeError
"""
Explanation: Python integers are variable-precision, so you can do computations that would overflow in other languages:
End of explanation
"""
|
cehbrecht/demo-notebooks | esgf-opendap.ipynb | apache-2.0 | from pyesgf.search import SearchConnection
conn = SearchConnection('http://esgf-data.dkrz.de/esg-search', distrib=False)
"""
Explanation: Prepare esgf search connection
See also http://esgf-pyclient.readthedocs.io/en/latest/examples.html
End of explanation
"""
ctx = conn.new_context(project='CORDEX', query='temperature')
print 'Hits:', ctx.hit_count
print 'Institute:'
ctx.facet_counts['institute']
"""
Explanation: run a query for CORDEX datasets and show facets
End of explanation
"""
ctx = conn.new_context(project='CORDEX', institute='MPI-CSC', experiment='historical', time_frequency='day')
print 'Hits:', ctx.hit_count
print 'Domain:', ctx.facet_counts['domain']
print 'Ensembles:', ctx.facet_counts['ensemble']
print 'Variable:', ctx.facet_counts['variable']
"""
Explanation: use facet search ...
End of explanation
"""
ctx = ctx.constrain(domain='EUR-11', ensemble='r1i1p1', variable='tasmax')
print 'Hits:', ctx.hit_count
"""
Explanation: add more constraints to select a single dataset
End of explanation
"""
result = ctx.search()[0]
agg_ctx = result.aggregation_context()
agg = agg_ctx.search()[0]
print agg.opendap_url
"""
Explanation: use aggregation context and get opendap url
End of explanation
"""
from pyesgf.logon import LogonManager
lm = LogonManager()
lm.logoff()
lm.is_logged_on()
"""
Explanation: use logon manager to get proxy certificate and prepare secured opendap access
End of explanation
"""
lm.logon(hostname="esgf-data.dkrz.de", bootstrap=True, interactive=True)
lm.is_logged_on()
"""
Explanation: ... logon on now with you username and password:
End of explanation
"""
from netCDF4 import Dataset
ds = Dataset(agg.opendap_url, 'r')
"""
Explanation: this updates you proxy certificate in ~/.esg/ and prepares a ~/.dodsrc config file for secured opendap access.
use python netCDF4 to access opendap data
see http://unidata.github.io/netcdf4-python/
End of explanation
"""
ds.ncattrs()
ds.getncattr('experiment')
"""
Explanation: show attributes:
End of explanation
"""
ds.variables.keys()
"""
Explanation: show variables:
End of explanation
"""
ds.variables['time']
"""
Explanation: details about time variable:
End of explanation
"""
ds.variables['tasmax']
"""
Explanation: show details about tasmax:
End of explanation
"""
import matplotlib.pyplot as plt
from cartopy import config
import cartopy.crs as ccrs
timestep = 0
tasmax = ds.variables['tasmax'][timestep, :, :]
lats = ds.variables['lat'][:]
lons = ds.variables['lon'][:]
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_global()
fig = plt.contourf(lons, lats, tasmax, 60, transform=ccrs.PlateCarree())
#plt.show()
"""
Explanation: Create a plot of the first timestep using cartopy
see http://scitools.org.uk/cartopy/docs/latest/matplotlib/advanced_plotting.html?highlight=netcdf
End of explanation
"""
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_global()
plt.contourf(lons.clip(min=0, max=20), lats.clip(min=40, max=60), tasmax, 60, transform=ccrs.PlateCarree())
#plt.show()
"""
Explanation: Clip the bounding box and plot it ...
End of explanation
"""
from nco import Nco
nco = Nco()
"""
Explanation: Write netcdf file with a given subset (boundingbox, time)
One needs to create a new Dataset object and copy all the attributes and data needed. This is a bit lengthly.
See the following example:
http://schubert.atmos.colostate.edu/~cslocum/netcdf_example.html
Use nco to write netcdf file for a subset
ncks -O -d time,0,0 http://esgf1.dkrz.de/thredds/dodsC/cordex.output.EUR-11.MPI-CSC.MPI-M-MPI-ESM-LR.historical.r1i1p1.REMO2009.v1.day.tasmax.tasmax.20160419.aggregation -o test.nc
Use nco python wrapper
End of explanation
"""
nc_out = '/tmp/out.nc'
nco.ncks(input=str(agg.opendap_url), output=nc_out, options="-O -d time,0,0 -d rlon,0,20 -d rlat,40,60 --netcdf4")
"""
Explanation: subset first timestamp
End of explanation
"""
from cdo import Cdo
cdo = Cdo()
cdo.sinfo(input=nc_out)
cdo.showname(input=nc_out)
"""
Explanation: show output with cdo
End of explanation
"""
|
liganega/Gongsu-DataSci | previous/y2017/W10-stats-correlation/.ipynb_checkpoints/GongSu22_Statistics_Population_Variance-checkpoint.ipynb | gpl-3.0 | from GongSu21_Statistics_Averages import *
"""
Explanation: 자료 안내: 여기서 다루는 내용은 아래 사이트의 내용을 참고하여 생성되었음.
https://github.com/rouseguy/intro2stats
모집단 분산 점추정
안내사항
지난 시간에 다룬 21장 내용을 활용하고자 한다.
따라서 아래와 같이 21장 내용을 모듈로 담고 있는 파이썬 파일을 임포트 해야 한다.
주의: GongSu21_Statistics_Averages.py 파일이 동일한 디렉토리에 있어야 한다.
End of explanation
"""
prices_pd.head()
"""
Explanation: 주요 내용
모집단과 표본
모집단 분산의 점추정
주요 예제
21장에서 다룬 미국의 51개 주에서 거래되는 담배(식물)의 도매가격 데이터를 보다 상세히 분석한다.
특히, 캘리포니아 주를 예제로 하여 주(State)별로 담배(식물) 도매가 전체에 대한 거래가의 평균과 분산을 점추정(point estimation)하는 방법을 다룬다.
주요 모듈
pandas: 통계분석 전용 모듈
numpy 모듈을 바탕으로 하여 통계분석에 특화된 모듈임.
마이크로소프트의 엑셀처럼 작동하는 기능을 지원함
datetime: 날짜와 시간을 적절하게 표시하도록 도와주는 기능을 지원하는 모듈
scipy: 수치계산, 공업수학 등을 지원하는 모듈
주의: 언급된 모듈은 이미 GongSu21_Statistics_Averages.py 모듈에서 임포트 되었음.
오늘 사용할 데이터
주별 담배(식물) 도매가격 및 판매일자: Weed_Price.csv
아래 그림은 미국의 주별 담배(식물) 판매 데이터를 담은 Weed_Price.csv 파일를 엑셀로 읽었을 때의 일부를 보여준다.
<p>
<table cellspacing="20">
<tr>
<td>
<img src="img/weed_price.png", width=600>
</td>
</tr>
</table>
</p>
주의: 언급된 파일이 GongSu21_Statistics_Averages 모듈에서 prices_pd 라는 변수에 저장되었음.
또한 주(State)별, 거래날짜별(date) 기준으로 이미 정렬되어 있음.
따라서 아래에서 볼 수 있듯이 예를 들어, prices_pd의 첫 다섯 줄의 내용은 알파벳순으로 가장 빠른 이름을 가진 알라바마(Alabama) 주에서 거래된 데이터 중에서 가정 먼저 거래된 5개의 거래내용을 담고 있다.
End of explanation
"""
california_pd['HighQ_dev'] = (california_pd['HighQ'] - ca_mean) ** 2
california_pd.head()
"""
Explanation: 모집단과 표본
Weed_Price.csv 파일에 담긴 담배(식물) 도매가는 미국에서 거래된 모든 도매가 정보가 아니라 소수의 거래 정보만을 담고 있다.
이와같이 조사대상의 소수만을 모아 둔 데이터를 표본(Sample)이라 부른다.
반면에 미국에서 거래되는 모든 담배(식물) 도매가 전체는 현재 조사하고자 하는 대상들의 모집단이라 부른다.
여기서는 Weed_Price.csv 파일에 담긴 표본을 이용하여 모집단에 대한 분산과, 주별로 이루어진 거래 사이의 상관관계를 확인하고자 한다.
참고: 모집단과 표본, 점추정에 대한 보다 자세한 설명은 아래의 두 파일을 참조한다.
* GongSu22_Statistics_Sampling_a.pdf
* GongSu22_Statistics_Sampling_b.pdf
모집단 평균값과 분산의 점추정
모집단의 평균값 점추정: 표본의 평균값을 그대로 이용한다.
$$\hat{x} = \bar x = \frac{\Sigma_{i=1}^{n} x_i}{n}$$
$\hat x\,\,$는 모집단 평균값의 점추정 기호
$\bar x$는 표본 데이터들의 평균값 기호
모집단의 분산 점추정: 표본 데이터를 이용해서 모집단의 분산을 추정할 수 있다.
$$\hat\sigma\,\, {}^2 = s^2 = \frac{\Sigma_{i = 1}^{n}(x_i - \bar x)^2}{n-1}$$
$\hat \sigma\,\, {}^2$는 모집단 분산의 점추정 기호
주의:
* $s^2$을 계산할 때 $n$ 대신에 $n-1$로 나누는 것에 주의한다.
* 모집단의 분산은 일반적으로 표본의 분산보다 좀 더 크기 때문이다.
캘리포니아 주에서 거래된 HighQ 담배(식물)의 도매가 전체에 대한 분산의 점추정
먼저 prices_pd에 포함된 데이터 중에서 캘리포니아 주에서 거래된 상품(HighQ) 담배(식물)의 가격들에 대한 연산이 필요하다.
즉, 아래 공식의 분자를 계산하기 위한 준비과정이다.
$$s^2 = \frac{\Sigma_{i = 1}^{n}(x_i - \bar x)^2}{n-1}$$
주의: 캘리포니아 주에서 거래된 상품(HighQ) 담배(식물)의 도매가의 평균값은 ca_mean으로 이미 계산되었다.
End of explanation
"""
ca_HighQ_variance = california_pd.HighQ_dev.sum() / (ca_count - 1)
ca_HighQ_variance
"""
Explanation: 이제 캘리포니아 주 거래된 상품(HighQ) 담배(식물)의 거래가 전체 모집단에 대한 분산 점추정을 계산할 수 있다.
주의: 표본의 크기는 ca_count이다.
End of explanation
"""
# 캘리포니아에서 거래된 상품(HighQ) 담배(식물) 도매가의 표준편차
ca_HighQ_SD = np.sqrt(ca_HighQ_variance)
ca_HighQ_SD
"""
Explanation: 주의:
* DataFrame 자료형의 연산은 넘파이 어레이의 연산처럼 항목별로 실행된다.
* sum 메소드의 활용을 기억한다.
표준편차의 점추정
모집단 분산의 점추정으로 얻은 값에다가 루트를 씌우면 된다.
End of explanation
"""
|
robertoalotufo/ia898 | master/tutorial_numpy_1_8.ipynb | mit | import numpy as np
a = np.array([0, 1, 2])
print('a = \n', a)
print()
print('Resultado da operação np.tile(a,2): \n',np.tile(a,2))
"""
Explanation: Table of Contents
<p><div class="lev1 toc-item"><a href="#Tile" data-toc-modified-id="Tile-1"><span class="toc-item-num">1 </span>Tile</a></div><div class="lev2 toc-item"><a href="#Exemplo-unidimensional---replicando-as-colunas" data-toc-modified-id="Exemplo-unidimensional---replicando-as-colunas-11"><span class="toc-item-num">1.1 </span>Exemplo unidimensional - replicando as colunas</a></div><div class="lev2 toc-item"><a href="#Exemplo-unidimensional---replicando-as-linhas" data-toc-modified-id="Exemplo-unidimensional---replicando-as-linhas-12"><span class="toc-item-num">1.2 </span>Exemplo unidimensional - replicando as linhas</a></div><div class="lev2 toc-item"><a href="#Exemplo-bidimensional---replicando-as-colunas" data-toc-modified-id="Exemplo-bidimensional---replicando-as-colunas-13"><span class="toc-item-num">1.3 </span>Exemplo bidimensional - replicando as colunas</a></div><div class="lev2 toc-item"><a href="#Exemplo-bidimensional---replicando-as-linhas" data-toc-modified-id="Exemplo-bidimensional---replicando-as-linhas-14"><span class="toc-item-num">1.4 </span>Exemplo bidimensional - replicando as linhas</a></div><div class="lev2 toc-item"><a href="#Exemplo-bidimensional---replicando-as-linhas-e-colunas-simultaneamente" data-toc-modified-id="Exemplo-bidimensional---replicando-as-linhas-e-colunas-simultaneamente-15"><span class="toc-item-num">1.5 </span>Exemplo bidimensional - replicando as linhas e colunas simultaneamente</a></div><div class="lev1 toc-item"><a href="#Documentação-Oficial-Numpy" data-toc-modified-id="Documentação-Oficial-Numpy-2"><span class="toc-item-num">2 </span>Documentação Oficial Numpy</a></div>
# Tile
Uma função importante da biblioteca numpy é a tile, que gera repetições do array passado com parâmetro. A quantidade de repetições é dada pelo parâmetro reps
## Exemplo unidimensional - replicando as colunas
End of explanation
"""
print('a = \n', a)
print()
print("Resultado da operação np.tile(a,(2,1)):\n" , np.tile(a,(2,1)))
"""
Explanation: Exemplo unidimensional - replicando as linhas
Para modificar as dimensões na quais a replicação será realizada modifica-se o parâmetro reps, passando ao invés de um int, uma tupla com as dimensões que se deseja alterar
End of explanation
"""
a = np.array([[0, 1], [2, 3]])
print('a = \n', a)
print()
print("Resultado da operação np.tile(a,2):\n", np.tile(a,2))
"""
Explanation: Exemplo bidimensional - replicando as colunas
End of explanation
"""
a = np.array([[0, 1], [2, 3]])
print('a = \n', a)
print()
print("Resultado da operação np.tile(a,(3,1)):\n", np.tile(a,(3,1)))
"""
Explanation: Exemplo bidimensional - replicando as linhas
End of explanation
"""
a = np.array([[0, 1], [2, 3]])
print('a = \n', a)
print()
print("Resultado da operação np.tile(a,(2,2)):\n", np.tile(a,(2,2)))
"""
Explanation: Exemplo bidimensional - replicando as linhas e colunas simultaneamente
End of explanation
"""
|
jmhsi/justin_tinker | data_science/courses/temp/courses/ml1/lesson5-nlp.ipynb | apache-2.0 | PATH='data/aclImdb/'
names = ['neg','pos']
%ls {PATH}
%ls {PATH}train
%ls {PATH}train/pos | head
trn,trn_y = texts_from_folders(f'{PATH}train',names)
val,val_y = texts_from_folders(f'{PATH}test',names)
"""
Explanation: IMDB dataset and the sentiment classification task
The large movie review dataset contains a collection of 50,000 reviews from IMDB. The dataset contains an even number of positive and negative reviews. The authors considered only highly polarized reviews. A negative review has a score ≤ 4 out of 10, and a positive review has a score ≥ 7 out of 10. Neutral reviews are not included in the dataset. The dataset is divided into training and test sets. The training set is the same 25,000 labeled reviews.
The sentiment classification task consists of predicting the polarity (positive or negative) of a given text.
To get the dataset, in your terminal run the following commands:
wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
gunzip aclImdb_v1.tar.gz
tar -xvf aclImdb_v1.tar
Tokenizing and term document matrix creation
End of explanation
"""
trn[0]
trn_y[0]
"""
Explanation: Here is the text of the first review
End of explanation
"""
veczr = CountVectorizer(tokenizer=tokenize)
"""
Explanation: CountVectorizer converts a collection of text documents to a matrix of token counts (part of sklearn.feature_extraction.text).
End of explanation
"""
trn_term_doc = veczr.fit_transform(trn)
val_term_doc = veczr.transform(val)
trn_term_doc
trn_term_doc[0]
vocab = veczr.get_feature_names(); vocab[5000:5005]
w0 = set([o.lower() for o in trn[0].split(' ')]); w0
len(w0)
veczr.vocabulary_['absurd']
trn_term_doc[0,1297]
trn_term_doc[0,5000]
"""
Explanation: fit_transform(trn) finds the vocabulary in the training set. It also transforms the training set into a term-document matrix. Since we have to apply the same transformation to your validation set, the second line uses just the method transform(val). trn_term_doc and val_term_doc are sparse matrices. trn_term_doc[i] represents training document i and it contains a count of words for each document for each word in the vocabulary.
End of explanation
"""
def pr(y_i):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
x=trn_term_doc
y=trn_y
r = np.log(pr(1)/pr(0))
b = np.log((y==1).mean() / (y==0).mean())
"""
Explanation: Naive Bayes
We define the log-count ratio $r$ for each word $f$:
$r = \log \frac{\text{ratio of feature $f$ in positive documents}}{\text{ratio of feature $f$ in negative documents}}$
where ratio of feature $f$ in positive documents is the number of times a positive document has a feature divided by the number of positive documents.
End of explanation
"""
pre_preds = val_term_doc @ r.T + b
preds = pre_preds.T>0
(preds==val_y).mean()
"""
Explanation: Here is the formula for Naive Bayes.
End of explanation
"""
x=trn_term_doc.sign()
r = np.log(pr(1)/pr(0))
pre_preds = val_term_doc.sign() @ r.T + b
preds = pre_preds.T>0
(preds==val_y).mean()
"""
Explanation: ...and binarized Naive Bayes.
End of explanation
"""
m = LogisticRegression(C=1e8, dual=True)
m.fit(x, y)
preds = m.predict(val_term_doc)
(preds==val_y).mean()
m = LogisticRegression(C=1e8, dual=True)
m.fit(trn_term_doc.sign(), y)
preds = m.predict(val_term_doc.sign())
(preds==val_y).mean()
"""
Explanation: Logistic regression
Here is how we can fit logistic regression where the features are the unigrams.
End of explanation
"""
m = LogisticRegression(C=0.1, dual=True)
m.fit(x, y)
preds = m.predict(val_term_doc)
(preds==val_y).mean()
m = LogisticRegression(C=0.1, dual=True)
m.fit(trn_term_doc.sign(), y)
preds = m.predict(val_term_doc.sign())
(preds==val_y).mean()
"""
Explanation: ...and the regularized version
End of explanation
"""
veczr = CountVectorizer(ngram_range=(1,3), tokenizer=tokenize, max_features=800000)
trn_term_doc = veczr.fit_transform(trn)
val_term_doc = veczr.transform(val)
trn_term_doc.shape
vocab = veczr.get_feature_names()
vocab[200000:200005]
y=trn_y
x=trn_term_doc.sign()
val_x = val_term_doc.sign()
r = np.log(pr(1) / pr(0))
b = np.log((y==1).mean() / (y==0).mean())
"""
Explanation: Trigram with NB features
Our next model is a version of logistic regression with Naive Bayes features described here. For every document we compute binarized features as described above, but this time we use bigrams and trigrams too. Each feature is a log-count ratio. A logistic regression model is then trained to predict sentiment.
End of explanation
"""
m = LogisticRegression(C=0.1, dual=True)
m.fit(x, y);
preds = m.predict(val_x)
(preds.T==val_y).mean()
"""
Explanation: Here we fit regularized logistic regression where the features are the trigrams.
End of explanation
"""
r.shape, r
np.exp(r)
"""
Explanation: Here is the $\text{log-count ratio}$ r.
End of explanation
"""
x_nb = x.multiply(r)
m = LogisticRegression(dual=True, C=0.1)
m.fit(x_nb, y);
val_x_nb = val_x.multiply(r)
preds = m.predict(val_x_nb)
(preds.T==val_y).mean()
"""
Explanation: Here we fit regularized logistic regression where the features are the trigrams' log-count ratios.
End of explanation
"""
sl=2000
# Here is how we get a model from a bag of words
md = TextClassifierData.from_bow(trn_term_doc, trn_y, val_term_doc, val_y, sl)
learner = md.dotprod_nb_learner()
learner.fit(0.02, 1, wds=1e-6, cycle_len=1)
learner.fit(0.02, 2, wds=1e-6, cycle_len=1)
learner.fit(0.02, 2, wds=1e-6, cycle_len=1)
"""
Explanation: fastai NBSVM++
End of explanation
"""
|
maxis42/ML-DA-Coursera-Yandex-MIPT | 4 Stats for data analysis/Homework/15 project genom cancer/Genom cancer.ipynb | mit | from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
gen = pd.read_csv('gene_high_throughput_sequencing.csv')
gen.head()
types, cnts = np.unique(gen.Diagnosis.values, return_counts=True)
_ = sns.barplot(types, cnts)
_ = plt.xlabel('Diagnosis')
_ = plt.ylabel('Count')
"""
Explanation: Обнаружение статистически значимых отличий в уровнях экспрессии генов больных раком
Данные для этой задачи взяты из исследования, проведённого в Stanford School of Medicine. В исследовании была предпринята попытка выявить набор генов, которые позволили бы более точно диагностировать возникновение рака груди на самых ранних стадиях.
В эксперименте принимали участие 24 человек, у которых не было рака груди (normal), 25 человек, у которых это заболевание было диагностировано на ранней стадии (early neoplasia), и 23 человека с сильно выраженными симптомами (cancer).
End of explanation
"""
#Diagnosis types
types
#Split data by groups
gen_normal = gen.loc[gen.Diagnosis == 'normal']
gen_neoplasia = gen.loc[gen.Diagnosis == 'early neoplasia']
gen_cancer = gen.loc[gen.Diagnosis == 'cancer']
"""
Explanation: Ученые провели секвенирование биологического материала испытуемых, чтобы понять, какие из этих генов наиболее активны в клетках больных людей.
Секвенирование — это определение степени активности генов в анализируемом образце с помощью подсчёта количества соответствующей каждому гену РНК.
В данных для этого задания представлена именно эта количественная мера активности каждого из 15748 генов у каждого из 72 человек, принимавших участие в эксперименте.
Нужно будет определить те гены, активность которых у людей в разных стадиях заболевания отличается статистически значимо.
Кроме того, нужно будет оценить не только статистическую, но и практическую значимость этих результатов, которая часто используется в подобных исследованиях.
Диагноз человека содержится в столбце под названием "Diagnosis".
Практическая значимость изменения
Цель исследований — найти гены, средняя экспрессия которых отличается не только статистически значимо, но и достаточно сильно. В экспрессионных исследованиях для этого часто используется метрика, которая называется fold change (кратность изменения). Определяется она следующим образом:
Fc(C,T)=T/C при T>C и -T/C при T<C,
где C,T — средние значения экспрессии гена в control и treatment группах соответственно. По сути, fold change показывает, во сколько раз отличаются средние двух выборок.
Часть 1: применение t-критерия Стьюдента
В первой части нужно применить критерий Стьюдента для проверки гипотезы о равенстве средних в двух независимых выборках. Применить критерий для каждого гена нужно будет дважды:
для групп normal (control) и early neoplasia (treatment)
для групп early neoplasia (control) и cancer (treatment)
В качестве ответа в этой части задания необходимо указать количество статистически значимых отличий, которые мы нашли с помощью t-критерия Стьюдента, то есть число генов, у которых p-value этого теста оказался меньше, чем уровень значимости.
End of explanation
"""
#Shapiro-Wilk test for samples
print('Shapiro-Wilk test for samples')
sw_normal = gen_normal.iloc[:,2:].apply(stats.shapiro, axis=0)
sw_normal_p = [p for _, p in sw_normal]
_, sw_normal_p_corr, _, _ = multipletests(sw_normal_p, method='fdr_bh')
sw_neoplasia = gen_neoplasia.iloc[:,2:].apply(stats.shapiro, axis=0)
sw_neoplasia_p = [p for _, p in sw_neoplasia]
_, sw_neoplasia_p_corr, _, _ = multipletests(sw_neoplasia_p, method='fdr_bh')
sw_cancer = gen_cancer.iloc[:,2:].apply(stats.shapiro, axis=0)
sw_cancer_p = [p for _, p in sw_cancer]
_, sw_cancer_p_corr, _, _ = multipletests(sw_cancer_p, method='fdr_bh')
print('Mean corrected p-value for "normal": %.4f' % sw_normal_p_corr.mean())
print('Mean corrected p-value for "early neoplasia": %.4f' % sw_neoplasia_p_corr.mean())
print('Mean corrected p-value for "cancer": %.4f' % sw_cancer_p_corr.mean())
"""
Explanation: Для того, чтобы использовать двухвыборочный критерий Стьюдента, убедимся, что распределения в выборках существенно не отличаются от нормальных, применив критерий Шапиро-Уилка.
End of explanation
"""
tt_ind_normal_neoplasia = stats.ttest_ind(gen_normal.iloc[:,2:], gen_neoplasia.iloc[:,2:], equal_var = False)
tt_ind_normal_neoplasia_p = tt_ind_normal_neoplasia[1]
tt_ind_neoplasia_cancer = stats.ttest_ind(gen_neoplasia.iloc[:,2:], gen_cancer.iloc[:,2:], equal_var = False)
tt_ind_neoplasia_cancer_p = tt_ind_neoplasia_cancer[1]
tt_ind_normal_neoplasia_p_5 = tt_ind_normal_neoplasia_p[np.where(tt_ind_normal_neoplasia_p < 0.05)].shape[0]
tt_ind_neoplasia_cancer_p_5 = tt_ind_neoplasia_cancer_p[np.where(tt_ind_neoplasia_cancer_p < 0.05)].shape[0]
print('Normal vs neoplasia samples p-values number below 0.05: %d' % tt_ind_normal_neoplasia_p_5)
print('Neoplasia vs cancer samples p-values number below 0.05: %d' % tt_ind_neoplasia_cancer_p_5)
with open('answer1.txt', 'w') as fout:
fout.write(str(tt_ind_normal_neoplasia_p_5))
with open('answer2.txt', 'w') as fout:
fout.write(str(tt_ind_neoplasia_cancer_p_5))
"""
Explanation: Так как среднее значение p-value >> 0.05, то будем применять критерий Стьюдента.
End of explanation
"""
#Holm correction
_, tt_ind_normal_neoplasia_p_corr, _, _ = multipletests(tt_ind_normal_neoplasia_p, method='holm')
_, tt_ind_neoplasia_cancer_p_corr, _, _ = multipletests(tt_ind_neoplasia_cancer_p, method='holm')
#Bonferroni correction
p_corr = np.array([tt_ind_normal_neoplasia_p_corr, tt_ind_neoplasia_cancer_p_corr])
_, p_corr_bonf, _, _ = multipletests(p_corr, is_sorted=True, method='bonferroni')
p_corr_bonf_normal_neoplasia_p_5 = p_corr_bonf[0][np.where(p_corr_bonf[0] < 0.05)].shape[0]
p_corr_bonf_neoplasia_cancer_p_5 = p_corr_bonf[1][np.where(p_corr_bonf[1] < 0.05)].shape[0]
print('Normal vs neoplasia samples p-values number below 0.05: %d' % p_corr_bonf_normal_neoplasia_p_5)
print('Neoplasia vs cancer samples p-values number below 0.05: %d' % p_corr_bonf_neoplasia_cancer_p_5)
def fold_change(C, T, limit=1.5):
'''
C - control sample
T - treatment sample
'''
if T >= C:
fc_stat = T / C
else:
fc_stat = -C / T
return (np.abs(fc_stat) > limit), fc_stat
#Normal vs neoplasia samples
gen_p_corr_bonf_normal_p_5 = gen_normal.iloc[:,2:].iloc[:, np.where(p_corr_bonf[0] < 0.05)[0]]
gen_p_corr_bonf_neoplasia0_p_5 = gen_neoplasia.iloc[:,2:].iloc[:, np.where(p_corr_bonf[0] < 0.05)[0]]
fc_corr_bonf_normal_neoplasia_p_5 = 0
for norm, neopl in zip(gen_p_corr_bonf_normal_p_5.mean(), gen_p_corr_bonf_neoplasia0_p_5.mean()):
accept, _ = fold_change(norm, neopl)
if accept: fc_corr_bonf_normal_neoplasia_p_5 += 1
#Neoplasia vs cancer samples
gen_p_corr_bonf_neoplasia1_p_5 = gen_neoplasia.iloc[:,2:].iloc[:, np.where(p_corr_bonf[1] < 0.05)[0]]
gen_p_corr_bonf_cancer_p_5 = gen_cancer.iloc[:,2:].iloc[:, np.where(p_corr_bonf[1] < 0.05)[0]]
fc_corr_bonf_neoplasia_cancer_p_5 = 0
for neopl, canc in zip(gen_p_corr_bonf_neoplasia1_p_5.mean(), gen_p_corr_bonf_cancer_p_5.mean()):
accept, _ = fold_change(neopl, canc)
if accept: fc_corr_bonf_neoplasia_cancer_p_5 += 1
print('Normal vs neoplasia samples fold change above 1.5: %d' % fc_corr_bonf_normal_neoplasia_p_5)
print('Neoplasia vs cancer samples fold change above 1.5: %d' % fc_corr_bonf_neoplasia_cancer_p_5)
with open('answer3.txt', 'w') as fout:
fout.write(str(fc_corr_bonf_normal_neoplasia_p_5))
with open('answer4.txt', 'w') as fout:
fout.write(str(fc_corr_bonf_neoplasia_cancer_p_5))
"""
Explanation: Часть 2: поправка методом Холма
Для этой части задания нам понадобится модуль multitest из statsmodels.
В этой части задания нужно будет применить поправку Холма для получившихся двух наборов достигаемых уровней значимости из предыдущей части. Обратим внимание, что поскольку мы будем делать поправку для каждого из двух наборов p-value отдельно, то проблема, связанная с множественной проверкой останется.
Для того, чтобы ее устранить, достаточно воспользоваться поправкой Бонферрони, то есть использовать уровень значимости 0.05 / 2 вместо 0.05 для дальнейшего уточнения значений p-value c помощью метода Холма.
В качестве ответа к этому заданию требуется ввести количество значимых отличий в каждой группе после того, как произведена коррекция Холма-Бонферрони. Причем это число нужно ввести с учетом практической значимости: посчитать для каждого значимого изменения fold change и выписать в ответ число таких значимых изменений, абсолютное значение fold change которых больше, чем 1.5.
Обратим внимание, что
применять поправку на множественную проверку нужно ко всем значениям достигаемых уровней значимости, а не только для тех, которые меньше значения уровня доверия;
при использовании поправки на уровне значимости 0.025 меняются значения достигаемого уровня значимости, но не меняется значение уровня доверия (то есть для отбора значимых изменений скорректированные значения уровня значимости нужно сравнивать с порогом 0.025, а не 0.05)!
End of explanation
"""
#Benjamini-Hochberg correction
_, tt_ind_normal_neoplasia_p_corr, _, _ = multipletests(tt_ind_normal_neoplasia_p, method='fdr_bh')
_, tt_ind_neoplasia_cancer_p_corr, _, _ = multipletests(tt_ind_neoplasia_cancer_p, method='fdr_bh')
#Bonferroni correction
p_corr = np.array([tt_ind_normal_neoplasia_p_corr, tt_ind_neoplasia_cancer_p_corr])
_, p_corr_bonf, _, _ = multipletests(p_corr, is_sorted=True, method='bonferroni')
p_corr_bonf_normal_neoplasia_p_5 = p_corr_bonf[0][np.where(p_corr_bonf[0] < 0.05)].shape[0]
p_corr_bonf_neoplasia_cancer_p_5 = p_corr_bonf[1][np.where(p_corr_bonf[1] < 0.05)].shape[0]
print('Normal vs neoplasia samples p-values number below 0.05: %d' % p_corr_bonf_normal_neoplasia_p_5)
print('Neoplasia vs cancer samples p-values number below 0.05: %d' % p_corr_bonf_neoplasia_cancer_p_5)
#Normal vs neoplasia samples
gen_p_corr_bonf_normal_p_5 = gen_normal.iloc[:,2:].iloc[:, np.where(p_corr_bonf[0] < 0.05)[0]]
gen_p_corr_bonf_neoplasia0_p_5 = gen_neoplasia.iloc[:,2:].iloc[:, np.where(p_corr_bonf[0] < 0.05)[0]]
fc_corr_bonf_normal_neoplasia_p_5 = 0
for norm, neopl in zip(gen_p_corr_bonf_normal_p_5.mean(), gen_p_corr_bonf_neoplasia0_p_5.mean()):
accept, _ = fold_change(norm, neopl)
if accept: fc_corr_bonf_normal_neoplasia_p_5 += 1
#Neoplasia vs cancer samples
gen_p_corr_bonf_neoplasia1_p_5 = gen_neoplasia.iloc[:,2:].iloc[:, np.where(p_corr_bonf[1] < 0.05)[0]]
gen_p_corr_bonf_cancer_p_5 = gen_cancer.iloc[:,2:].iloc[:, np.where(p_corr_bonf[1] < 0.05)[0]]
fc_corr_bonf_neoplasia_cancer_p_5 = 0
for neopl, canc in zip(gen_p_corr_bonf_neoplasia1_p_5.mean(), gen_p_corr_bonf_cancer_p_5.mean()):
accept, _ = fold_change(neopl, canc)
if accept: fc_corr_bonf_neoplasia_cancer_p_5 += 1
print('Normal vs neoplasia samples fold change above 1.5: %d' % fc_corr_bonf_normal_neoplasia_p_5)
print('Neoplasia vs cancer samples fold change above 1.5: %d' % fc_corr_bonf_neoplasia_cancer_p_5)
with open('answer5.txt', 'w') as fout:
fout.write(str(fc_corr_bonf_normal_neoplasia_p_5))
with open('answer6.txt', 'w') as fout:
fout.write(str(fc_corr_bonf_neoplasia_cancer_p_5))
"""
Explanation: Часть 3: поправка методом Бенджамини-Хохберга
Данная часть задания аналогична второй части за исключением того, что нужно будет использовать метод Бенджамини-Хохберга.
Обратим внимание, что методы коррекции, которые контролируют FDR, допускает больше ошибок первого рода и имеют большую мощность, чем методы, контролирующие FWER. Большая мощность означает, что эти методы будут совершать меньше ошибок второго рода (то есть будут лучше улавливать отклонения от H0, когда они есть, и будут чаще отклонять H0, когда отличий нет).
В качестве ответа к этому заданию требуется ввести количество значимых отличий в каждой группе после того, как произведена коррекция Бенджамини-Хохберга, причем так же, как и во второй части, считать только такие отличия, у которых abs(fold change) > 1.5.
End of explanation
"""
|
tensorflow/docs-l10n | site/en-snapshot/quantum/tutorials/quantum_data.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
!pip install tensorflow==2.7.0 tensorflow-quantum
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
import cirq
import sympy
import numpy as np
import tensorflow as tf
import tensorflow_quantum as tfq
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
np.random.seed(1234)
"""
Explanation: Quantum data
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/quantum_data"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/quantum_data.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Building off of the comparisons made in the MNIST tutorial, this tutorial explores the recent work of Huang et al. that shows how different datasets affect performance comparisons. In the work, the authors seek to understand how and when classical machine learning models can learn as well as (or better than) quantum models. The work also showcases an empirical performance separation between classical and quantum machine learning model via a carefully crafted dataset. You will:
Prepare a reduced dimension Fashion-MNIST dataset.
Use quantum circuits to re-label the dataset and compute Projected Quantum Kernel features (PQK).
Train a classical neural network on the re-labeled dataset and compare the performance with a model that has access to the PQK features.
Setup
End of explanation
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# Rescale the images from [0,255] to the [0.0,1.0] range.
x_train, x_test = x_train/255.0, x_test/255.0
print("Number of original training examples:", len(x_train))
print("Number of original test examples:", len(x_test))
"""
Explanation: 1. Data preparation
You will begin by preparing the fashion-MNIST dataset for running on a quantum computer.
1.1 Download fashion-MNIST
The first step is to get the traditional fashion-mnist dataset. This can be done using the tf.keras.datasets module.
End of explanation
"""
def filter_03(x, y):
keep = (y == 0) | (y == 3)
x, y = x[keep], y[keep]
y = y == 0
return x,y
x_train, y_train = filter_03(x_train, y_train)
x_test, y_test = filter_03(x_test, y_test)
print("Number of filtered training examples:", len(x_train))
print("Number of filtered test examples:", len(x_test))
print(y_train[0])
plt.imshow(x_train[0, :, :])
plt.colorbar()
"""
Explanation: Filter the dataset to keep just the T-shirts/tops and dresses, remove the other classes. At the same time convert the label, y, to boolean: True for 0 and False for 3.
End of explanation
"""
def truncate_x(x_train, x_test, n_components=10):
"""Perform PCA on image dataset keeping the top `n_components` components."""
n_points_train = tf.gather(tf.shape(x_train), 0)
n_points_test = tf.gather(tf.shape(x_test), 0)
# Flatten to 1D
x_train = tf.reshape(x_train, [n_points_train, -1])
x_test = tf.reshape(x_test, [n_points_test, -1])
# Normalize.
feature_mean = tf.reduce_mean(x_train, axis=0)
x_train_normalized = x_train - feature_mean
x_test_normalized = x_test - feature_mean
# Truncate.
e_values, e_vectors = tf.linalg.eigh(
tf.einsum('ji,jk->ik', x_train_normalized, x_train_normalized))
return tf.einsum('ij,jk->ik', x_train_normalized, e_vectors[:,-n_components:]), \
tf.einsum('ij,jk->ik', x_test_normalized, e_vectors[:, -n_components:])
DATASET_DIM = 10
x_train, x_test = truncate_x(x_train, x_test, n_components=DATASET_DIM)
print(f'New datapoint dimension:', len(x_train[0]))
"""
Explanation: 1.2 Downscale the images
Just like the MNIST example, you will need to downscale these images in order to be within the boundaries for current quantum computers. This time however you will use a PCA transformation to reduce the dimensions instead of a tf.image.resize operation.
End of explanation
"""
N_TRAIN = 1000
N_TEST = 200
x_train, x_test = x_train[:N_TRAIN], x_test[:N_TEST]
y_train, y_test = y_train[:N_TRAIN], y_test[:N_TEST]
print("New number of training examples:", len(x_train))
print("New number of test examples:", len(x_test))
"""
Explanation: The last step is to reduce the size of the dataset to just 1000 training datapoints and 200 testing datapoints.
End of explanation
"""
def single_qubit_wall(qubits, rotations):
"""Prepare a single qubit X,Y,Z rotation wall on `qubits`."""
wall_circuit = cirq.Circuit()
for i, qubit in enumerate(qubits):
for j, gate in enumerate([cirq.X, cirq.Y, cirq.Z]):
wall_circuit.append(gate(qubit) ** rotations[i][j])
return wall_circuit
"""
Explanation: 2. Relabeling and computing PQK features
You will now prepare a "stilted" quantum dataset by incorporating quantum components and re-labeling the truncated fashion-MNIST dataset you've created above. In order to get the most seperation between quantum and classical methods, you will first prepare the PQK features and then relabel outputs based on their values.
2.1 Quantum encoding and PQK features
You will create a new set of features, based on x_train, y_train, x_test and y_test that is defined to be the 1-RDM on all qubits of:
$V(x_{\text{train}} / n_{\text{trotter}}) ^ {n_{\text{trotter}}} U_{\text{1qb}} | 0 \rangle$
Where $U_\text{1qb}$ is a wall of single qubit rotations and $V(\hat{\theta}) = e^{-i\sum_i \hat{\theta_i} (X_i X_{i+1} + Y_i Y_{i+1} + Z_i Z_{i+1})}$
First, you can generate the wall of single qubit rotations:
End of explanation
"""
SVGCircuit(single_qubit_wall(
cirq.GridQubit.rect(1,4), np.random.uniform(size=(4, 3))))
"""
Explanation: You can quickly verify this works by looking at the circuit:
End of explanation
"""
def v_theta(qubits):
"""Prepares a circuit that generates V(\theta)."""
ref_paulis = [
cirq.X(q0) * cirq.X(q1) + \
cirq.Y(q0) * cirq.Y(q1) + \
cirq.Z(q0) * cirq.Z(q1) for q0, q1 in zip(qubits, qubits[1:])
]
exp_symbols = list(sympy.symbols('ref_0:'+str(len(ref_paulis))))
return tfq.util.exponential(ref_paulis, exp_symbols), exp_symbols
"""
Explanation: Next you can prepare $V(\hat{\theta})$ with the help of tfq.util.exponential which can exponentiate any commuting cirq.PauliSum objects:
End of explanation
"""
test_circuit, test_symbols = v_theta(cirq.GridQubit.rect(1, 2))
print(f'Symbols found in circuit:{test_symbols}')
SVGCircuit(test_circuit)
"""
Explanation: This circuit might be a little bit harder to verify by looking at, but you can still examine a two qubit case to see what is happening:
End of explanation
"""
def prepare_pqk_circuits(qubits, classical_source, n_trotter=10):
"""Prepare the pqk feature circuits around a dataset."""
n_qubits = len(qubits)
n_points = len(classical_source)
# Prepare random single qubit rotation wall.
random_rots = np.random.uniform(-2, 2, size=(n_qubits, 3))
initial_U = single_qubit_wall(qubits, random_rots)
# Prepare parametrized V
V_circuit, symbols = v_theta(qubits)
exp_circuit = cirq.Circuit(V_circuit for t in range(n_trotter))
# Convert to `tf.Tensor`
initial_U_tensor = tfq.convert_to_tensor([initial_U])
initial_U_splat = tf.tile(initial_U_tensor, [n_points])
full_circuits = tfq.layers.AddCircuit()(
initial_U_splat, append=exp_circuit)
# Replace placeholders in circuits with values from `classical_source`.
return tfq.resolve_parameters(
full_circuits, tf.convert_to_tensor([str(x) for x in symbols]),
tf.convert_to_tensor(classical_source*(n_qubits/3)/n_trotter))
"""
Explanation: Now you have all the building blocks you need to put your full encoding circuits together:
End of explanation
"""
qubits = cirq.GridQubit.rect(1, DATASET_DIM + 1)
q_x_train_circuits = prepare_pqk_circuits(qubits, x_train)
q_x_test_circuits = prepare_pqk_circuits(qubits, x_test)
"""
Explanation: Choose some qubits and prepare the data encoding circuits:
End of explanation
"""
def get_pqk_features(qubits, data_batch):
"""Get PQK features based on above construction."""
ops = [[cirq.X(q), cirq.Y(q), cirq.Z(q)] for q in qubits]
ops_tensor = tf.expand_dims(tf.reshape(tfq.convert_to_tensor(ops), -1), 0)
batch_dim = tf.gather(tf.shape(data_batch), 0)
ops_splat = tf.tile(ops_tensor, [batch_dim, 1])
exp_vals = tfq.layers.Expectation()(data_batch, operators=ops_splat)
rdm = tf.reshape(exp_vals, [batch_dim, len(qubits), -1])
return rdm
x_train_pqk = get_pqk_features(qubits, q_x_train_circuits)
x_test_pqk = get_pqk_features(qubits, q_x_test_circuits)
print('New PQK training dataset has shape:', x_train_pqk.shape)
print('New PQK testing dataset has shape:', x_test_pqk.shape)
"""
Explanation: Next, compute the PQK features based on the 1-RDM of the dataset circuits above and store the results in rdm, a tf.Tensor with shape [n_points, n_qubits, 3]. The entries in rdm[i][j][k] = $\langle \psi_i | OP^k_j | \psi_i \rangle$ where i indexes over datapoints, j indexes over qubits and k indexes over $\lbrace \hat{X}, \hat{Y}, \hat{Z} \rbrace$ .
End of explanation
"""
def compute_kernel_matrix(vecs, gamma):
"""Computes d[i][j] = e^ -gamma * (vecs[i] - vecs[j]) ** 2 """
scaled_gamma = gamma / (
tf.cast(tf.gather(tf.shape(vecs), 1), tf.float32) * tf.math.reduce_std(vecs))
return scaled_gamma * tf.einsum('ijk->ij',(vecs[:,None,:] - vecs) ** 2)
def get_spectrum(datapoints, gamma=1.0):
"""Compute the eigenvalues and eigenvectors of the kernel of datapoints."""
KC_qs = compute_kernel_matrix(datapoints, gamma)
S, V = tf.linalg.eigh(KC_qs)
S = tf.math.abs(S)
return S, V
S_pqk, V_pqk = get_spectrum(
tf.reshape(tf.concat([x_train_pqk, x_test_pqk], 0), [-1, len(qubits) * 3]))
S_original, V_original = get_spectrum(
tf.cast(tf.concat([x_train, x_test], 0), tf.float32), gamma=0.005)
print('Eigenvectors of pqk kernel matrix:', V_pqk)
print('Eigenvectors of original kernel matrix:', V_original)
"""
Explanation: 2.2 Re-labeling based on PQK features
Now that you have these quantum generated features in x_train_pqk and x_test_pqk, it is time to re-label the dataset. To achieve maximum seperation between quantum and classical performance you can re-label the dataset based on the spectrum information found in x_train_pqk and x_test_pqk.
Note: This preparation of your dataset to explicitly maximize the seperation in performance between the classical and quantum models might feel like cheating, but it provides a very important proof of existance for datasets that are hard for classical computers and easy for quantum computers to model. There would be no point in searching for quantum advantage in QML if you couldn't first create something like this to demonstrate advantage.
End of explanation
"""
def get_stilted_dataset(S, V, S_2, V_2, lambdav=1.1):
"""Prepare new labels that maximize geometric distance between kernels."""
S_diag = tf.linalg.diag(S ** 0.5)
S_2_diag = tf.linalg.diag(S_2 / (S_2 + lambdav) ** 2)
scaling = S_diag @ tf.transpose(V) @ \
V_2 @ S_2_diag @ tf.transpose(V_2) @ \
V @ S_diag
# Generate new lables using the largest eigenvector.
_, vecs = tf.linalg.eig(scaling)
new_labels = tf.math.real(
tf.einsum('ij,j->i', tf.cast(V @ S_diag, tf.complex64), vecs[-1])).numpy()
# Create new labels and add some small amount of noise.
final_y = new_labels > np.median(new_labels)
noisy_y = (final_y ^ (np.random.uniform(size=final_y.shape) > 0.95))
return noisy_y
y_relabel = get_stilted_dataset(S_pqk, V_pqk, S_original, V_original)
y_train_new, y_test_new = y_relabel[:N_TRAIN], y_relabel[N_TRAIN:]
"""
Explanation: Now you have everything you need to re-label the dataset! Now you can consult with the flowchart to better understand how to maximize performance seperation when re-labeling the dataset:
<img src="./images/quantum_data_1.png">
In order to maximize the seperation between quantum and classical models, you will attempt to maximize the geometric difference between the original dataset and the PQK features kernel matrices $g(K_1 || K_2) = \sqrt{ || \sqrt{K_2} K_1^{-1} \sqrt{K_2} || _\infty}$ using S_pqk, V_pqk and S_original, V_original. A large value of $g$ ensures that you initially move to the right in the flowchart down towards a prediction advantage in the quantum case.
Note: Computing quantities for $s$ and $d$ are also very useful when looking to better understand performance seperations. In this case ensuring a large $g$ value is enough to see performance seperation.
End of explanation
"""
#docs_infra: no_execute
def create_pqk_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[len(qubits) * 3,]))
model.add(tf.keras.layers.Dense(16, activation='sigmoid'))
model.add(tf.keras.layers.Dense(1))
return model
pqk_model = create_pqk_model()
pqk_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.003),
metrics=['accuracy'])
pqk_model.summary()
#docs_infra: no_execute
pqk_history = pqk_model.fit(tf.reshape(x_train_pqk, [N_TRAIN, -1]),
y_train_new,
batch_size=32,
epochs=1000,
verbose=0,
validation_data=(tf.reshape(x_test_pqk, [N_TEST, -1]), y_test_new))
"""
Explanation: 3. Comparing models
Now that you have prepared your dataset it is time to compare model performance. You will create two small feedforward neural networks and compare performance when they are given access to the PQK features found in x_train_pqk.
3.1 Create PQK enhanced model
Using standard tf.keras library features you can now create and a train a model on the x_train_pqk and y_train_new datapoints:
End of explanation
"""
#docs_infra: no_execute
def create_fair_classical_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[DATASET_DIM,]))
model.add(tf.keras.layers.Dense(16, activation='sigmoid'))
model.add(tf.keras.layers.Dense(1))
return model
model = create_fair_classical_model()
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.03),
metrics=['accuracy'])
model.summary()
#docs_infra: no_execute
classical_history = model.fit(x_train,
y_train_new,
batch_size=32,
epochs=1000,
verbose=0,
validation_data=(x_test, y_test_new))
"""
Explanation: 3.2 Create a classical model
Similar to the code above you can now also create a classical model that doesn't have access to the PQK features in your stilted dataset. This model can be trained using x_train and y_label_new.
End of explanation
"""
#docs_infra: no_execute
plt.figure(figsize=(10,5))
plt.plot(classical_history.history['accuracy'], label='accuracy_classical')
plt.plot(classical_history.history['val_accuracy'], label='val_accuracy_classical')
plt.plot(pqk_history.history['accuracy'], label='accuracy_quantum')
plt.plot(pqk_history.history['val_accuracy'], label='val_accuracy_quantum')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
"""
Explanation: 3.3 Compare performance
Now that you have trained the two models you can quickly plot the performance gaps in the validation data between the two. Typically both models will achieve > 0.9 accuaracy on the training data. However on the validation data it becomes clear that only the information found in the PQK features is enough to make the model generalize well to unseen instances.
End of explanation
"""
|
RadoslawDryzner/LeRepoDuGuerrier | Homework02/Homework 2.ipynb | mit | # Import libraries
import requests
from bs4 import BeautifulSoup
import json
import math
import time
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
"""
Explanation: First, we import all the needed librairies.
End of explanation
"""
r = requests.get('https://www.topuniversities.com/sites/default/files/qs-rankings-data/357051.txt?_=1508259845358')
raw_data = json.loads(r.text)['data'][:200]
"""
Explanation: 1. Top-ranking universities
We noticed that the actual data from topuniversities is not directly on the webpage, but on a separate text file, in JSON format.
Thus, we first get this JSON, parse it, and take the first 200 entries in it.
We noticed that the univertsity with rank 199 is actually the 198th entry, and thus the last 3 universities needs to have their rank corrected.
End of explanation
"""
raw_data[0]
"""
Explanation: We can print the first entry of the data to see how the informations are represented.
End of explanation
"""
def process_university(uni):
name = uni['title']
rank = get_rank(uni['rank_display'])
country = uni['country']
region = uni['region']
numbers = get_numbers(uni['url'])
info = {'name' : name, 'rank' : rank, 'country' : country, 'region' : region}
info.update(numbers)
return info
"""
Explanation: We can now define functions that will help us during the processing of this JSON.
First, process_university takes as input the raw JSON of a particular university, and outputs a dictionnary containing the name, rank, country, region, number of faculty members (international and total) and number of students (international and total) for that given unviversity.
It uses other functions defined below.
End of explanation
"""
def get_rank(rank_display):
rank = int(rank_display.replace("=", ""))
if rank >= 199:
rank -= 1
return rank
"""
Explanation: As there can be ties in rank, the displayed rank is not always a integer. Furthermore, as said above, the last 3 universities have incorrect ranks and need to be fixed.
End of explanation
"""
def get_numbers(url):
r = requests.get("https://www.topuniversities.com/" + url)
soup = BeautifulSoup(r.text, 'html.parser')
faculty_info = soup.select(".text .number")
if len(faculty_info) >= 2:
total_faculty = parse_int(faculty_info[0].decode_contents(formatter="html"))
international_faculty = parse_int(faculty_info[1].decode_contents(formatter="html"))
else:
total_faculty = math.nan
international_faculty = math.nan
student_info = soup.select(".barp .number")
if len(faculty_info) >= 2:
total_student = parse_int(student_info[0].decode_contents(formatter="html"))
international_student = parse_int(student_info[1].decode_contents(formatter="html"))
else:
total_student = math.nan
international_student = math.nan
return {'total_faculty' : total_faculty, 'international_faculty' : international_faculty, 'total_student' : total_student, 'international_student' : international_student}
def parse_int(str):
return int(str.replace("\n", "").replace(" ", "").replace(",", ""))
"""
Explanation: To get the number of faculty members (international and total) and number of students (international and total), we need to get another request, and this time, we will need to parse the webpage using BeautifulSoup.
By inspecting the webpage, we noticed the classes of the elements where the numbers are contained. Once we get these elements, we further need to parse its content, to get the value as an integer.
During the parsing, we noticed that one university (NYU) did not have the same template as the others, and so its number of students is unknown.
End of explanation
"""
# Uncomment and run this if you want to regenerate the JSON
"""unis1 = []
for uni in raw_data:
unis1.append(process_university(uni))
with open('data1.json', 'w') as f:
json.dump(unis1, f)"""
with open('data1.json', 'r') as f:
unis1 = json.load(f)
df = pd.DataFrame(unis1)
df
"""
Explanation: We put the gathered and parsed data of the universities in a new JSON file for later reuse so that we don't have to generate new requests every time we run the notebook. Then we create a dataframe from this data and display it.
End of explanation
"""
df['staff_student_ratio'] = df['total_faculty'] / df['total_student']
df.sort_values(['staff_student_ratio'], ascending=[False])[['name', 'rank', 'staff_student_ratio']]
"""
Explanation: Rank according to the ratio between faculty members and students
End of explanation
"""
df.sort_values('staff_student_ratio', ascending=False)[['name', 'staff_student_ratio']].head(10).plot(title='Rank according to the ratio between faculty members and students',
figsize=(13,6),kind='bar', x = 'name')
"""
Explanation: For clarity, we only show the universities with the 10 highest ratio.
End of explanation
"""
df['international_student_ratio'] = df['international_student'] / df['total_student']
df.sort_values(['international_student_ratio'], ascending=[False])[['name', 'rank', 'international_student_ratio']]
"""
Explanation: We note that most of the universities with a high ratio are also highly ranked.
Rank according to the ratio of international students
End of explanation
"""
df.sort_values('international_student_ratio', ascending=False)[['name', 'international_student_ratio']].head(10).plot(title='Rank according to the ratio of international students',figsize=(13,6), kind='bar', x = 'name')
"""
Explanation: For clarity, we only show the universities with the 10 highest ratio.
End of explanation
"""
df_staff_country = df.groupby('country').mean().sort_values('staff_student_ratio', ascending=False)[['staff_student_ratio']]
df_staff_country
df_staff_country['staff_student_ratio'].plot(title='Rank according to the ratio between faculty members and students, grouped by country', figsize=(16,6),kind='bar')
"""
Explanation: We can note that again, a lot of higly ranked universities also have a high ratio, but in this case, it's not as much apparent as in the previous ratio.
Rank according to the ratio between faculty members and students, grouped by country
End of explanation
"""
df_staff_region = df.groupby('region').mean().sort_values('staff_student_ratio', ascending=False)[['staff_student_ratio']]
df_staff_region
df_staff_region['staff_student_ratio'].plot(title = 'Rank according to the ratio between faculty members and students, grouped by region',figsize=(13,6),kind='bar')
"""
Explanation: Here we can note some surprising results. A lot of countries that have high ratios do not seem to have a lot of universities that are highly ranked. In fact we believe that for these countries, only a few or even a single university is in the ranking. Given that the university would be the best university in that country, it is understandable that their ratio is high.
Rank according to the ratio between faculty members and students, grouped by region
End of explanation
"""
df_int_country = df.groupby('country').mean().sort_values('international_student_ratio', ascending=False)[['international_student_ratio']]
df_int_country
df_int_country['international_student_ratio'].plot(title='Rank according to the ratio of international students, grouped by country', figsize=(16,6),kind='bar')
"""
Explanation: Here we note that the highest ratios seem to be in regions that have the most of highly ranked universities.
Rank according to the ratio of international students, grouped by country
End of explanation
"""
df_int_region = df.groupby('region').mean().sort_values('international_student_ratio', ascending=False)[['international_student_ratio']]
df_int_region
df_int_region['international_student_ratio'].plot(title='Rank according to the ratio of international students, grouped by region', figsize=(13,6),kind='bar')
"""
Explanation: We note that the countries that seem to have the highest ratios of international students also seem to be the most attractive in terms of their location and/or wealth.
Rank according to the ratio of international students, grouped by region
End of explanation
"""
country_region = dict(df[['country', 'region']].groupby(['country', 'region']).groups.keys())
"""
Explanation: The results by region seem to confirm the results by country. Again, attractive locations and rich countries seem to attract the most international students. Latin America and Asia however are an exception.
For the next part, where we look at the rankings of universities according to Times Higher Education, we need to define a mapping of countries and regions, since the second rankings do not contain region data and we would like to have it in our tables.
End of explanation
"""
r2 = requests.get('https://www.timeshighereducation.com/sites/default/files/the_data_rankings/world_university_rankings_2018_limit0_369a9045a203e176392b9fb8f8c1cb2a.json')
raw_data2 = json.loads(r2.text)['data'][:200]
"""
Explanation: 2. With Times Higher Education
Similarly to the previous part, we notice that all the relevant data can be obtained from a simple text file in JSON format. Moreover we see that the ratios we are interested in are already present in this file and need not be calculated.
End of explanation
"""
unis2 = []
for uni in raw_data2:
name = uni['name']
rank = uni['rank'].replace('=', '')
country = uni['location']
if country == 'Russian Federation':
country = 'Russia'
int_students = uni['stats_pc_intl_students'].replace('%', '')
staff_student = uni['stats_student_staff_ratio']
info = {'name' : name, 'rank': rank, 'country': country, 'region' : country_region.get(country, 'Europe'),
'international_student_ratio' : int(int_students) / 100.0, 'staff_student_ratio': 1 / float(staff_student)}
unis2.append(info)
"""
Explanation: When loading the university data from the webpage, we have to adapt it so that it matches the format from the other ranking. One problem we have is that in Times Higher Education, Russia is referred to as the Russian Federation, so we change it back to Russia. Moreover in the dictionary we created for the countries and regions, we do not have a region value for Luxemburg since it doesn't appear in the first ranking. So we manually put the region to Europe.
End of explanation
"""
# Uncomment and run this if you want to regenerate the JSON
"""with open('data2.json', 'w') as f:
json.dump(unis2, f)"""
with open('data2.json', 'r') as f:
all_unis2 = json.load(f)
df2 = pd.DataFrame(all_unis2)
df2
"""
Explanation: As before, we put our data in a new JSON file to avoid reloading the file each time we run the notebook and generating new requests.
End of explanation
"""
df2[['name', 'staff_student_ratio']].sort_values('staff_student_ratio', ascending=False)
"""
Explanation: Rank according to the ratio between faculty members and students
End of explanation
"""
df2.sort_values('staff_student_ratio', ascending=False)[['name', 'staff_student_ratio']].head(10).plot(title='Rank according to the ratio between faculty members and students', figsize=(13,6),kind='bar', x = 'name')
"""
Explanation: For clarity, we only show the universities with the 10 highest ratio.
End of explanation
"""
df2.sort_values('international_student_ratio', ascending=False)[['name', 'international_student_ratio']]
"""
Explanation: For this ranking, we see quite a difference with the previous ranking. Here, universities with a high ratio are not necessarily the universities with the highest ranking.
Rank according to the ratio of international students
End of explanation
"""
df2.sort_values('international_student_ratio', ascending=False)[['name', 'international_student_ratio']].head(10).plot(title='Rank according to the ratio of international students', figsize=(13,6),kind='bar', x = 'name')
"""
Explanation: For clarity, we only show the universities with the 10 highest ratio.
End of explanation
"""
df_staff_country2 = df2.groupby('country').mean().sort_values('staff_student_ratio', ascending=False)[['staff_student_ratio']]
df_staff_country2
df_staff_country2['staff_student_ratio'].plot(title='Rank according to the ratio between faculty members and students, grouped by country', figsize=(16,6),kind='bar')
"""
Explanation: The results for this ranking seem to be very similar to the other ranking with the exception of additional unversities that were not present in the other ranking.
Rank according to the ratio between faculty members and students, grouped by country
End of explanation
"""
df_staff_region2 = df2.groupby('region').mean().sort_values('staff_student_ratio', ascending=False)[['staff_student_ratio']]
df_staff_region2
df_staff_region2['staff_student_ratio'].plot(title='Rank according to the ratio between faculty members and students, grouped by region', figsize=(13,6),kind='bar')
"""
Explanation: We note similar results as in the first ranking.
Rank according to the ratio between faculty members and students, grouped by region
End of explanation
"""
df_int_country2 = df2.groupby('country').mean().sort_values('international_student_ratio', ascending=False)[['international_student_ratio']]
df_int_country2
df_int_country2['international_student_ratio'].plot(title='Rank according to the ratio of international students, grouped by country', figsize=(16,6),kind='bar')
"""
Explanation: We note a few differences in this result when comparing to the first ranking. First, we see that Africa is now in second place. What happened is that all the regions seem to have a lower average ratio than in the previous ranking, but Africa's average reduced the least. Moreober, we note the dissapearance of Latin America.
Rank according to the ratio of international students, grouped by country
End of explanation
"""
df_int_region2 = df2.groupby('region').mean().sort_values('international_student_ratio', ascending=False)[['international_student_ratio']]
df_int_region2
df_int_region2['international_student_ratio'].plot(title='Rank according to the ratio of international students, grouped by region', figsize=(13,6),kind='bar')
"""
Explanation: The results here are very similar than the previous ranking with the exception of the addition of Luxemburg, which was not present in the previous ranking.
Rank according to the ratio of international students, grouped by region
End of explanation
"""
mapping = {}
with open('mapping.json', 'r') as f:
mapping = json.load(f)
def get_url(name):
r = requests.get('https://encrypted.google.com/search?q=' + name.replace(' ', '+')) # request the Google results page
soup = BeautifulSoup(r.text, 'html.parser')
google_url = soup.select('.g a')[0]['href'] # we get the first Google result
url = google_url[google_url.find("://")+3:google_url.find("&")] # we get the URL of the first result
if url.endswith("/"):
url = url[0:-1]
time.sleep(5) # the wait needed to avoid getting blocked by Google
return url
def get_identifier(name):
if not(name in mapping): #if the name is already in the mapping, no need to run the search again
mapping[name] = get_url(name)
return mapping[name]
"""
Explanation: The results here are again very similar to the first ranking.
3. Merging of the two dataframes
For the third question, the goal is to merge both dataframes, and thus, as sometimes the university names are not the same in both websites, we need to map them to the same value for both to perform the merge correctly.
To solve this problem, we used Google search : we search for each university names in both dataframes, and take the first link output by Google (usually the university website), and then use this URL to perform the join.
As Google blocks any device that performs too many searches, we delayed each search by 5 seconds, and then store the mapping in a JSON file to avoid searching again each time the notebook is run.
With this technique, we have a very high rate of success, with only 3 universities for which both searches didn't output the same link. These universities were fixed manually so if you save the automatically generated JSON again below, you will not have the same mapping as we do in the file that is present in the repository.
End of explanation
"""
df['url'] = df['name'].apply(get_identifier)
df2['url'] = df2['name'].apply(get_identifier)
raw_merge = df.merge(df2[['url', 'rank', 'international_student_ratio', 'staff_student_ratio']], how='inner', on='url')
raw_merge.columns = ['Country',
'International faculty',
'International students',
'Name',
'Rank topuniversities',
'Region',
'Total faculty',
'Total students',
'Faculty / students ratio topuniversities',
'International students ratio topuniversities',
'url',
'Rank timeshighereducation',
'International students ratio timeshighereducation',
'Faculty / students ratio timeshighereducation']
raw_merge
# Uncomment and run this if you want to save the JSON
"""with open('mapping.json', 'w') as f:
json.dump(mapping, f)"""
"""
Explanation: Now that we have the mapping, we can apply it to both datasets and then merge them on the 'url' column.
End of explanation
"""
raw_merge_cleaned = raw_merge.copy()
raw_merge_cleaned['rank_time'] = 200 - raw_merge_cleaned['Rank timeshighereducation'].astype(int)
raw_merge_cleaned['rank_top'] = 200 - raw_merge_cleaned['Rank topuniversities'].astype(int)
raw_merge_cleaned[pd.notnull(raw_merge['International students ratio topuniversities'])]
raw_merge_cleaned['International faculty ratio'] = raw_merge_cleaned['International faculty'] / raw_merge_cleaned['Total faculty']
raw_merge_cleaned.corr()
"""
Explanation: 4. Find useful insights in the data
We begin by cleaning and adapting our dataset to better see meaningful correlations. For example, we put the ranking of each unversity in the reverse order, since we want to have a higher value for higher ranked universities to get meaningful correlations. Moreover we drop some rows where we miss the data we need. We also add another ratio that we thought to be usefull for the last part of the homework, the international faculy ratio for each unversity.
End of explanation
"""
raw_merge_cleaned[['Faculty / students ratio topuniversities',
'International students ratio topuniversities',
'International students ratio timeshighereducation',
'Faculty / students ratio timeshighereducation',
'International faculty ratio',
'rank_time',
'rank_top']].corr()
"""
Explanation: We notice that there is an obvious and natural correlation between the columns corresponding to the numbers of students and faculty members. Of course, it's normal than when there are more students, there are more international students as well and more faculty members. These correlations however are not interesting.
Another uninsteresting observation are the correlations between the ratios in the two rankings.
The more interesting observations are given by the ratios than the numbers.
End of explanation
"""
raw_merge_cleaned[['Total students',
'Total faculty',
'International faculty',
'International students',
'rank_time',
'rank_top']].corr()
"""
Explanation: When looking at only the correlations involving the ratios and ranks, we can note a few interesting things. First note the additional ratio that is the ratio of international faculty members (this information comes from the Top Universities ranking only).
First, we can note that the faculty/students ratio from Top Universities has a relatively high correlation with both ranks. There are two possible explanations in our opinion. First we can think that higher ranked universities may have more funds to hire more faculty staff. Secondly, we can also assume that universities with a higher rank may want to keep their high position or improve it by hiring more faculty members.
Another relatively important correlation we see is the correlation between international student ratio and both ranks. This can surely be explained by the attractiveness of high ranked universities to international students.
The highest correlation we observe is the one between international faculty ratio and the international students ratio. Universities that attract international students will also attract international faculty members for probably the same reason.
End of explanation
"""
raw_merge_cleaned['score'] = (0.1 * raw_merge_cleaned['Faculty / students ratio topuniversities'] +
0.1 * raw_merge_cleaned['Faculty / students ratio timeshighereducation'] +
0.025 * raw_merge_cleaned['International students ratio topuniversities'] +
0.025 * raw_merge_cleaned['International students ratio timeshighereducation'] +
0.15 * raw_merge_cleaned['International faculty ratio'] +
0.3 * (raw_merge_cleaned['rank_time'] / 200.0) +
0.3 * (raw_merge_cleaned['rank_top'] / 200.0))
raw_merge_cleaned[['Name', 'score']].sort_values(by='score', ascending=False).reset_index(drop=True)
"""
Explanation: We can still take a quick look at the total numbers and their correlations. In fact we can see that there is almost no correlation between the total number of students and the rank. Moreover, we see also see that the higher the rank, the more faculty members there are in the university. This confirms our previous intuition when we looked at the ratios.
5. Find the best university
For the last question, we have the following approach. We will create new score of the universities based on the different ratio columns for which we have the correlations. Then we will calculate the weighted average of each of these scores for each university which will define the new ranking. The top scored university of this ranking will our best unversity.
The weights we use will be based on the correlations we found earlier and the explanations that we apply to them. We will apply the following weights for each collumns:
* Faculty/student ratio: 0.1 for each ranking. We consider this to be the most important factor for the rank for reasons explained above.
* International student ratios: 0.025 for each ranking. We choose this weight because we consider the ratio to be the mostly the consequence of the ranking: high ranked universities attrach more international students because of their rank.
* International faculty ratio : 0.15 (we source this statistic from only one of the rankings). This is an important factor for us because we believe that a large number of international faculty members may mean that the university is already good and attracts professors from around the world.
* Original ranks (normalized between 0 to 1, 1 being the best): 0.3 for each. Since the original ranks are made from other interesting factors that we have not explored here, it's important to take them into account at a high weight.
End of explanation
"""
|
swirlingsand/deep-learning-foundations | image-classification-project-2/dlnd_image_classification.ipynb | mit | from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
"""
Explanation: Image Classification
In this project we classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects.
Get the Data
Run the following cell to download the CIFAR-10 dataset for python.
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 5
sample_id = 55
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
"""
Explanation: Explore the Data
The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following:
* airplane
* automobile
* bird
* cat
* deer
* dog
* frog
* horse
* ship
* truck
Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch.
Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions.
End of explanation
"""
def normalize(x):
"""
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
As there are 255 colours, we will divide the colours by 255 to get
it to be within the range of 0 to 1.
for example 185 / 255 is .619
We do this to make the data easier to work with
"""
normal = x / 255.
return normal
tests.test_normalize(normalize)
"""
Explanation: Implement Preprocess Functions
Normalize
End of explanation
"""
import tensorflow as tf
from sklearn import preprocessing
def one_hot_encode(x):
"""
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
array with lots of 0s except a 1 where there is the thing we want
"""
lb = preprocessing.LabelBinarizer()
lb.fit(x)
lb.classes_ = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(lb.classes_)
return lb.transform(x)
tests.test_one_hot_encode(one_hot_encode)
"""
Explanation: One-hot encode
End of explanation
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.
End of explanation
"""
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
"""
Explanation: Check Point
The preprocessed data has been saved to disk.
End of explanation
"""
import tensorflow as tf
def neural_net_image_input(image_shape):
"""
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
"""
print(*image_shape)
tensor = tf.placeholder(tf.float32,
shape=[None, image_shape[0], image_shape[1], image_shape[2]],
name="x")
return tensor
def neural_net_label_input(n_classes):
"""
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
"""
print(n_classes)
y = tf.placeholder(tf.float32, (None, n_classes), name="y")
return y
def neural_net_keep_prob_input():
"""
Return a Tensor for keep probability
: return: Tensor for keep probability.
"""
# TODO: Implement Function
keep = tf.placeholder(tf.float32, (None), name="keep_prob")
return keep
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
"""
Explanation: Build the network
Note: None for shapes in TensorFlow allow for a dynamic size.
End of explanation
"""
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
"""
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
"""
tensor_shape = int(x_tensor.get_shape()[3])
# Generate values with tf.truncated_normal
weights = tf.Variable(tf.truncated_normal([*conv_ksize, tensor_shape, conv_num_outputs],
mean=0, stddev=.1))
print(weights)
bias = tf.Variable(tf.zeros(shape=[conv_num_outputs], dtype=tf.float32))
print(bias)
result = tf.nn.conv2d(x_tensor, weights, strides=[1, *conv_strides, 1], padding="SAME")
print(result)
result = result + bias
print(result)
#Activation
result = tf.nn.elu(result)
# result = tf.nn.max_pool(result, [1, *pool_ksize, 1], [1, *pool_strides, 1], padding="SAME")
print(result)
return result
# tests.test_con_pool(conv2d_maxpool)
"""
Explanation: Convolution and Max Pooling Layer
End of explanation
"""
def flatten(x_tensor):
"""
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
"""
# TODO: Implement Function
return tf.contrib.layers.flatten(x_tensor)
tests.test_flatten(flatten)
"""
Explanation: Flatten Layer
End of explanation
"""
def fully_conn(x_tensor, num_outputs):
"""
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
return tf.contrib.layers.fully_connected(x_tensor, num_outputs)
tests.test_fully_conn(fully_conn)
"""
Explanation: Fully-Connected Layer
End of explanation
"""
def output(x_tensor, num_outputs):
"""
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
return tf.contrib.layers.fully_connected(x_tensor,
num_outputs, activation_fn=None)
tests.test_output(output)
"""
Explanation: Output Layer
End of explanation
"""
def print_stats(session, feature_batch, label_batch, cost, accuracy):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
print("Loss", session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0}))
print("Accuracy", session.run(accuracy, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0}))
print("Validation",session.run(accuracy, feed_dict={x: valid_features, y: valid_labels, keep_prob: 1.0}))
"""
Explanation: Show Stats
End of explanation
"""
def conv_net(x, keep_prob):
"""
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
"""
pool_ksize = (4, 4)
pool_strides = (4, 4)
conv_ksize = (3, 3)
conv_num_outputs = 96
conv_strides = (2, 2)
net = conv2d_maxpool(x, conv_num_outputs,
conv_ksize, conv_strides,
pool_ksize, pool_strides)
net = conv2d_maxpool(net, conv_num_outputs,
conv_ksize, conv_strides,
pool_ksize, pool_strides)
net = tf.nn.dropout(net, keep_prob)
conv_num_outputs = 192
conv_strides = (2, 2)
net = conv2d_maxpool(net, conv_num_outputs,
conv_ksize, conv_strides,
pool_ksize, pool_strides)
net = conv2d_maxpool(net, conv_num_outputs,
conv_ksize, conv_strides,
pool_ksize, pool_strides)
net = tf.nn.dropout(net, keep_prob)
conv_ksize = (1, 1)
conv_strides = (1, 1)
net = conv2d_maxpool(net, conv_num_outputs,
conv_ksize, conv_strides,
pool_ksize, pool_strides)
conv_num_outputs = 16
net = conv2d_maxpool(net, conv_num_outputs,
conv_ksize, conv_strides,
pool_ksize, pool_strides)
net = tf.contrib.layers.avg_pool2d(net, [2,2])
net = flatten(net)
# Apply an Output Layer
net = output(net, 10)
return net
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=.001).minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
"""
Explanation: Create Convolutional Model
End of explanation
"""
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
"""
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
"""
session.run(optimizer,
feed_dict = {x:feature_batch, y:label_batch, keep_prob:keep_probability})
tests.test_train_nn(train_neural_network)
"""
Explanation: Train the Neural Network
Single Optimization
Implement the function train_neural_network to do a single optimization.
End of explanation
"""
epochs = 10
batch_size = 64
keep_probability = .7
"""
Explanation: Hyperparameters
End of explanation
"""
"""
New test based on:
https://arxiv.org/pdf/1412.6806.pdf
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
"""
Explanation: Train on a Single CIFAR-10 Batch
Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch.
End of explanation
"""
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
"""
Explanation: Fully Train the Model
Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
"""
Test the saved model against the test dataset
"""
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
"""
Explanation: Checkpoint
The model has been saved to disk.
Test Model
Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.
End of explanation
"""
|
xclxxl414/rqalpha | docs/source/notebooks/run-rqalpha-in-ipython.ipynb | apache-2.0 | %load_ext rqalpha
"""
Explanation: IPython 与 RQAlpha
加载 RQAlpha magic
End of explanation
"""
%%rqalpha -h
""
"""
Explanation: 查看 RQAlpha magic 帮助
我们可以通过 %%rqalpha 直接在 cell 中运行回测代码。 %%rqalpha 后面的参数等价于在 CLI 中后面的 rqalpha run 的参数
End of explanation
"""
%%rqalpha -s 20100101 -e 20170505 -p -bm 000001.XSHG --account stock 100000
def init(context):
context.stocks = ['000300.XSHG', '000905.XSHG', '000012.XSHG']
def handle_bar(context, bar_dict):
[hs, zz, gz] = context.stocks
hs_history20 = history_bars(hs, 20, '1d', 'close')
zz_history20 = history_bars(zz, 20, '1d', 'close')
hsIncrease = hs_history20[-1] - hs_history20[0]
zzIncrease = zz_history20[-1] - zz_history20[0]
positions = context.portfolio.positions
[hsQuality, zzQuality, gzQuality] = [positions[hs].quantity, positions[zz].quantity, positions[gz].quantity]
if hsIncrease < 0 and zzIncrease < 0:
if hsQuality > 0: order_target_percent(hs, 0)
if zzQuality > 0: order_target_percent(zz, 0)
order_target_percent(gz, 1)
elif hsIncrease < zzIncrease:
if hsQuality > 0: order_target_percent(hs, 0)
if gzQuality > 0: order_target_percent(gz, 0)
order_target_percent(zz, 1)
else:
if zzQuality > 0: order_target_percent(zz, 0)
if gzQuality > 0: order_target_percent(gz, 0)
order_target_percent(hs, 1)
#logger.info("positions hs300: " + str(hsQuality) + ", zz500: " + str(zzQuality) + ", gz: " + str(gzQuality))
"""
Explanation: 使用 %%rqalpha 进行回测
End of explanation
"""
results.keys()
report.keys()
report.trades[:5]
report.portfolio[:5]
report.stock_positions[:5]
"""
Explanation: 获取回测报告
运行完回测后,报告会自动存储到 report 变量中。可以直接通过 report 变量获取当次回测的结果。
另外 rqalpha 的 mod 的输出会自动存储在 results 变量中。
End of explanation
"""
config = {
"base": {
"start_date": "2010-01-01",
"end_date": "2017-05-05",
"benchmark": "000001.XSHG",
"accounts": {
"stock": 100000
}
},
"extra": {
"log_level": "info",
},
"mod": {
"sys_analyser": {
"enabled": True,
"plot": True,
},
}
}
from rqalpha.api import *
from rqalpha import run_func
def init(context):
context.stocks = ['000300.XSHG', '000905.XSHG', '000012.XSHG']
def handle_bar(context, bar_dict):
[hs, zz, gz] = context.stocks
hs_history20 = history_bars(hs, 20, '1d', 'close')
zz_history20 = history_bars(zz, 20, '1d', 'close')
hsIncrease = hs_history20[-1] - hs_history20[0]
zzIncrease = zz_history20[-1] - zz_history20[0]
positions = context.portfolio.positions
[hsQuality, zzQuality, gzQuality] = [positions[hs].quantity, positions[zz].quantity, positions[gz].quantity]
if hsIncrease < 0 and zzIncrease < 0:
if hsQuality > 0: order_target_percent(hs, 0)
if zzQuality > 0: order_target_percent(zz, 0)
order_target_percent(gz, 1)
elif hsIncrease < zzIncrease:
if hsQuality > 0: order_target_percent(hs, 0)
if gzQuality > 0: order_target_percent(gz, 0)
order_target_percent(zz, 1)
else:
if zzQuality > 0: order_target_percent(zz, 0)
if gzQuality > 0: order_target_percent(gz, 0)
order_target_percent(hs, 1)
results = run_func(init=init, handle_bar=handle_bar, config=config)
report = results["sys_analyser"]
report["trades"][:5]
"""
Explanation: 使用 run_func 运行回测
End of explanation
"""
|
Jesusomar97/Simulacion2017 | Modulo1/Clase8_MembranaCircular.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
from scipy import special
import numpy as np
from ipywidgets import *
r = np.linspace(0, 10,100)
for n in range(5):
plt.plot(r, special.jn(n, r), label = '$J_{%s}(r)$'%n)
plt.xlabel('$r$', fontsize = 18)
plt.ylabel('$J_{n}(r)$', fontsize = 18)
plt.axhline(y = 0, color = 'k')
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), prop={'size': 14})
plt.show()
"""
Explanation: ¿Cómo vibra un tambor cuando lo golpeas?
Analizar el problema de la membrana vibrante permite entender el funcionamiento de instrumentos de percusión tales como los tambores, timbales e incluso sistemas biológicos como el tímpano.
Referencias:
- https://en.wikipedia.org/wiki/Bessel_function
- https://es.wikipedia.org/wiki/Vibraciones_de_una_membrana_circular
- https://docs.scipy.org/doc/scipy-0.18.1/reference/tutorial/special.html
Considere un tambor (membrana) de radio $a$, entonces la función de onda en $\mathbb{R}^2$ para este sistema se puede escribir como,
$$ \frac{1}{v}\frac{\partial^2 u}{\partial t^2} = \frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} $$
donde $u\equiv u(x,y,t)$ es el desplazamiento transversal_(elevación)_ y $v$ es la rapidez de propagación de la onda.
La forma habitual de encontrar la solución a esta ecuación es primero hacer un cambio de coordenadas, de cartesianas a polares y posteriormente considerar el método de separación de variable, tal que
$$ u(r, \theta, t) = R(r) \Theta(\theta) T (t)$$
Esta sustitución da como resultado tres ecuaciones diferenciales, una para cada variable de separación. Y cuyas soluciones nos ayudan a escribir los modos normales.
$$u_{nk}(r,\theta, t) = J_{n}(\lambda_{nk} r)(a_{nk}\cos{n\theta} + b_{nk}\sin{n\theta})\cos{(v\lambda_{nk}t)}$$
$$u^{}{nk}(r,\theta, t) = J{n}(\lambda_{nk}r)(a^{}{nk}\cos{n\theta} + b^{*}{nk}\sin{n\theta})\sin{(v\lambda_{nk}t)})$$
para $n = 0,1,2,\dots$, $k = 1,2,3,\dots$, donde $J_{n}$ es la función de Bessel de orden $n$ de primera clase. Además,
$$\lambda_{nk} = \frac{\alpha_{nk}}{a}$$
donde $\alpha_{nk}$ es el k-ésimo cero de $J_{n}(\lambda a)=0$. Esto es consecuencia de que $u$ sea cero en la frontera de la membrana, $r = a$.
Los coeficientes $a_{nk} , b_{nk}, a^{}_{nk}$ y $b^{}_{nk}$ se determinan de tal forma que cumplan con las condiciones iniciales:
$$u(r,\theta, 0) = f(r,\theta)$$
$$u_{t}(r,\theta, 0) = g(r,\theta)$$
donde el primer termino es la geometría inicial y el segundo la rapidez inicial. Se puede demostrar que las expresiones para estos coeficientes se pueden escribir como:
\begin{align}
a_{0k} &= \frac{1}{\pi a^2 J_{1}^{2}(\alpha_{0k})}\int_{0}^{2\pi}\int_{0}^{a}\; f(r,\theta)\, J_{0}(\lambda_{0k}r)\, r \, dr \, d\theta\
a_{nk} &= \frac{2}{\pi a^2 J_{n+1}^{2}(\alpha_{nk})}\int_{0}^{2\pi}\int_{0}^{a}\; f(r,\theta)\, J_{n}(\lambda_{nk}r)\cos(n\theta)\, r \, dr \, d\theta\
b_{nk} &= \frac{2}{\pi a^2 J_{n+1}^{2}(\alpha_{nk})}\int_{0}^{2\pi}\int_{0}^{a}\; f(r,\theta)\, J_{n}(\lambda_{nk}r)\sin(n\theta)\, r \, dr \, d\theta
\end{align}
Y similarmente,
\begin{align}
a^{}{0k} &= \frac{1}{\pi \,v\, \alpha{0k}\,a J_{1}^{2}(\alpha_{0k})}\int_{0}^{2\pi}\int_{0}^{a}\; g(r,\theta)\, J_{0}(\lambda_{0k}r)\, r \, dr \, d\theta\
a^{}{nk} &= \frac{2}{\pi\, v\,\alpha{0k}\, a J_{n+1}^{2}(\alpha_{nk})}\int_{0}^{2\pi}\int_{0}^{a}\; g(r,\theta)\, J_{n}(\lambda_{nk}r)\cos(n\theta)\, r \, dr \, d\theta\
b^{*}{nk} &= \frac{2}{\pi\, v\,\alpha{0k}\, a J_{n+1}^{2}(\alpha_{nk})}\int_{0}^{2\pi}\int_{0}^{a}\; g(r,\theta)\, J_{n}(\lambda_{nk}r)\sin(n\theta)\, r \, dr \, d\theta
\end{align}
Estamos familiarizados con la función coseno, pero no tanto con la función de Bessel. Entonces, nuestra primera actividad será conocer su comportamiento.
\begin{align}
u(r,\theta, t) &= \sum_{n=0}^{\infty}\sum_{k = 1}^{\infty}J_{n}(\lambda_{nk} r)(a_{nk}\cos{n\theta} + b_{nk}\sin{n\theta})\cos{(v\lambda_{nk}t)}\
&+ \sum_{n=0}^{\infty}\sum_{k = 1}^{\infty}J_{n}(\lambda_{nk}r)(a^{}_{nk}\cos{n\theta} + b^{}{nk}\sin{n\theta})\sin{(v\lambda{nk}t)})
\end{align}
End of explanation
"""
def f_shape(r):
return 1 - r**4
r = np.r_[0:1:100j]
angle = np.r_[0:2*np.pi:200j]
r_shape = f_shape(r)
u = np.array([np.full(len(angle), radi) for radi in r_shape])
x = np.array([var_r * np.cos(angle) for var_r in r])
y = np.array([var_r * np.sin(angle) for var_r in r])
plt.figure(figsize = (6, 5))
plt.pcolor(x, y, u_good, cmap = 'viridis')
plt.axis('off')
plt.colorbar()
plt.show()
"""
Explanation: Por simplicidad vamos a suponer que $r = 1$ y determinar los ceros, significa encontrar todas las intersecciones de las curvas anteriores con el eje horizontal.
Ejemplo: Caso radialmente simétrico
Suponga que $a = 1$, $v = 1$ y que las condiciones iniciales son:
$$ f(r,\theta) = 1- r^4\quad\quad g(r,\theta) = 0$$
Dado que la rapidez inicial es cero, entonces $a^{}_{nk} = b^{}_{nk} = 0$
End of explanation
"""
from sympy import init_printing; init_printing(use_latex='mathjax')
import sympy as sym
r, theta, k = sym.symbols('r theta k')
r, theta, k
n = sym.Symbol('n', positive = True, integer=True)
n
def lamb(n,k):
return sym.Symbol('lambda_%s%s'%(n,k), positive = True)
lamb(0,k)
f = 1 - r**4; f
integrand = f * sym.besselj(n, lamb(n,k) * r) * sym.cos(n *theta) * r
integrand
ank = sym.Integral(integrand, (r, 0, 1), (theta, 0, 2*sym.pi))
ank
solution = ank.doit()
solution
"""
Explanation: Y la solución para el desplazamiento en el tiempo es simplemente,
\begin{equation}
u(r,\theta, t) = \sum_{n=0}^{\infty}\sum_{k = 1}^{\infty}J_{n}(\lambda_{nk} r)(a_{nk}\cos{n\theta} + b_{nk}\sin{n\theta})\cos{(v\lambda_{nk}t)}
\end{equation}
Entonces, solo será necesario encontrar $a_{nk}$ y $b_{nk}$.
\begin{align}
a_{0k} &= \frac{1}{\pi a^2 J_{1}^{2}(\alpha_{0k})}\int_{0}^{2\pi}\int_{0}^{a}\; f(r,\theta)\, J_{0}(\lambda_{0k}r)\, r \, dr \, d\theta\
a_{nk} &= \frac{2}{\pi a^2 J_{n+1}^{2}(\alpha_{nk})}\int_{0}^{2\pi}\int_{0}^{a}\; f(r,\theta)\, J_{n}(\lambda_{nk}r)\cos(n\theta)\, r \, dr \, d\theta\
b_{nk} &= \frac{2}{\pi a^2 J_{n+1}^{2}(\alpha_{nk})}\int_{0}^{2\pi}\int_{0}^{a}\; f(r,\theta)\, J_{n}(\lambda_{nk}r)\sin(n\theta)\, r \, dr \, d\theta
\end{align}
Para resolver estas integrales haremos uso de sympy. Iniciemos con $a_{nk}$.
Primer caso $n>0$
End of explanation
"""
integ = lambda n: f * sym.besselj(n, lamb(n,k) * r) * sym.cos(n*theta) * r
integ(0)
a0k = sym.Integral(integ(0), (r, 0, 1), (theta, 0, 2*sym.pi))
a0k
a0k_solution = a0k.doit()
a0k_solution
a0k_sol = 1/(sym.pi*sym.besselj(1, lamb(0,k))**2)*a0k_solution
a0k_sol
sym.simplify(a0k_sol)
"""
Explanation: Entonces para cualquier $n>0$ no se tiene contribución.
Segundo caso $n=0$
End of explanation
"""
integrand_b = f * sym.besselj(n, lamb(n,k) * r) * sym.sin(n *theta) * r
integrand_b
bnk = sym.Integral(integrand_b, (r, 0, 1), (theta, 0, 2*sym.pi))
bnk
solution_b = bnk.doit()
solution_b
"""
Explanation: Y para $b_{nk}$
End of explanation
"""
integ_b = lambda n: f * sym.besselj(n, lamb(n,k) * r) * sym.sin(n*theta) * r
integ_b(0)
"""
Explanation: ¿Qué sucede para $n = 0$?
End of explanation
"""
a0k_sol
"""
Explanation: Ahhh! el integrando es cero, entonces este termino también es cero.
\begin{equation}
u(r,\theta, t) = \sum_{k = 1}^{\infty} a_{0k}J_{0}(\lambda_{0k} r)\cos{(v\lambda_{0k}t)}
\end{equation}
End of explanation
"""
def a0k_sym(lambd):
solucion = (-8*special.jn(0, lambd)/lambd**2
+32*special.jn(1, lambd)/lambd**3 +
64*special.jn(0, lambd)/lambd**4 -
128*special.jn(1, lambd)/lambd**5)/special.jn(1, lambd)**2
return solucion
def tambor(v, kth_zero, nt, t):
r = np.r_[0:1:100j]
angle = np.r_[0:2*np.pi:200j]
ceros = special.jn_zeros(0, nt)
lambd = ceros[kth_zero]
u_r = a0k_sym(lambd)*special.jn(0, lambd * r) * np.cos(lambd * v * t)
u = np.array([np.full(len(angle), u_rs) for u_rs in u_r])
x = np.array([var_r * np.cos(angle) for var_r in r])
y = np.array([var_r * np.sin(angle) for var_r in r])
return x, y, u
x1, y1, u1 = tambor(1, 0, 15, 7)
plt.figure(figsize = (6, 5))
plt.pcolor(x1 , y1 , u1, cmap = 'viridis')
plt.axis('off')
plt.colorbar()
plt.show()
def tambor_nk(t = 0, kth=0):
fig = plt.figure(figsize = (6,5))
ax = fig.add_subplot(1, 1, 1)
x, y, u = tambor(1, kth, 50, t)
im = ax.pcolor(x, y, u, cmap = 'viridis')
ax.set_xlim(xmin = -1.1, xmax = 1.1)
ax.set_ylim(ymin = -1.1, ymax = 1.1)
plt.colorbar(im)
plt.axis('off')
fig.canvas.draw()
interact_manual(tambor_nk, t = (0, 15,.01), n = (0, 10, 1), kth = (0, 10, 1));
def tambor_nk(t = 0, kth=0):
fig = plt.figure(figsize = (6,5))
ax = fig.add_subplot(1, 1, 1)
x, y, u = tambor(1, kth, 50, t)
im = ax.pcolor(x, y, u, cmap = 'viridis', vmax = 1.2, vmin = -1.2)
ax.set_xlim(xmin = -1.1, xmax = 1.1)
ax.set_ylim(ymin = -1.1, ymax = 1.1)
plt.colorbar(im)
plt.axis('off')
fig.canvas.draw()
interact(tambor_nk, t = (0, 15,.01), n = (0, 10, 1), kth = (0, 10, 1));
"""
Explanation: Primero vamos a programar para algún modo $k$.
End of explanation
"""
def tambor_n_allk(v, nk_zeros, t):
r = np.r_[0:1:100j]
angle = np.r_[0:2*np.pi:200j]
ceros = special.jn_zeros(0, nk_zeros)
lambd = ceros[0]
u_r = a0k_sym(lambd)*special.jn(0, lambd * r) * np.cos(lambd * v * t)
u0 = np.array([np.full(len(angle), u_rs) for u_rs in u_r])
for cero in range(1, nk_zeros):
lambd = ceros[cero]
u_r = a0k_sym(lambd)*special.jn(0, lambd * r) * np.cos(lambd * v * t)
u = np.array([np.full(len(angle), u_rs) for u_rs in u_r])
u0 += u
x = np.array([var_r * np.cos(angle) for var_r in r])
y = np.array([var_r * np.sin(angle) for var_r in r])
return x, y, u0
def tambor_0(t = 0):
fig = plt.figure(figsize = (6,5))
ax = fig.add_subplot(1, 1, 1)
x, y, u = tambor_n_allk(1, 15, t)
im = ax.pcolor(x, y, u, cmap = 'viridis')
ax.set_xlim(xmin = -1.1, xmax = 1.1)
ax.set_ylim(ymin = -1.1, ymax = 1.1)
plt.colorbar(im)
plt.axis('off')
fig.canvas.draw()
interact_manual(tambor_0, t = (0, 15,.01));
"""
Explanation: Y ahoara, la solución completa.
End of explanation
"""
def tambor(n, r_max, v, kth_zero, nt, t):
r = np.r_[0:r_max:100j]
angle = np.r_[0:2*np.pi:200j]
ceros = special.jn_zeros(0, nt)
lamb = ceros[kth_zero]
u = np.array([special.jn(n, lamb* var_r) * np.cos(n * angle)
* np.cos(lamb * v * t) for var_r in r])
x = np.array([var_r * np.cos(angle) for var_r in r])
y = np.array([var_r * np.sin(angle) for var_r in r])
return x, y, u
"""
Explanation: Fíjise bien, la condición inicial en $t = 0$, se cumple para la solución encontrada.
Tarea
Problema 1. Suponga que $a = 1$, $v = 1$ y que las condiciones iniciales son:
$$ f(r,\theta) = (1- r^4)\cos(\theta)\quad\quad g(r,\theta) = 0$$
Problema 2. uponga que $a = 2$, $v = 1$ y que las condiciones iniciales son:
$$ f(r,\theta) = 0 \quad\quad g(r,\theta) = r^2(2- r)\sin^8\left(\frac{\theta}{2}\right)$$
Fin Modulo 1
Versión antigua (Lo único que tal vez sirva de algo, son los programas)
Tal vez sirvan de algo para sus tareas.
Entonces, primero veamos algunos modos normales del sistema. Por ejemplo(demasiado simplificado),
$$u(r,\theta, t){nk} = J{n}(\lambda_{nk} r)\,\cos(n\theta)\,\cos(\lambda_{nk} v t)$$
La siguiente función se aplica caso simplificado.
End of explanation
"""
x, y, u = tambor(1, 1, 1, 0, 15, 0)
plt.figure(figsize = (6, 5))
plt.pcolor(x, y, u, cmap = 'viridis')
plt.axis('off')
plt.colorbar()
plt.show()
"""
Explanation: Entonces, por ejemplo si $n = 1$, $a = 1$, $v = 1$, $k = 1$ y $t= 0$. Este sería el modo de vibración $(n,k)\rightarrow (1,1)$.
End of explanation
"""
def tambor_nk(t = 0, n = 0, kth=0):
fig = plt.figure(figsize = (6,5))
ax = fig.add_subplot(1, 1, 1)
x, y, u = tambor(n, 1, 1, kth, 15, t)
im = ax.pcolor(x, y, u, cmap = 'viridis')
ax.set_xlim(xmin = -1.1, xmax = 1.1)
ax.set_ylim(ymin = -1.1, ymax = 1.1)
plt.colorbar(im)
plt.axis('off')
fig.canvas.draw()
interact_manual(tambor_nk, t = (0, 15,.01), n = (0, 10, 1), kth = (0, 10, 1));
"""
Explanation: Ahora, veamos como lucen todos demás modos de vibración $(n,k)$.
End of explanation
"""
def tambor_n_allk(n, r_max, v, nk_zeros, t):
r = np.r_[0:r_max:100j]
angle = np.r_[0:2*np.pi:200j]
ceros = special.jn_zeros(0, nk_zeros)
lamb = ceros[0]
u0 = np.array([special.jn(n, lamb* var_r) * np.cos(n * angle)
* np.cos(lamb * v * t) for var_r in r])
for cero in range(1, nk_zeros):
lamb = ceros[cero]
u = np.array([special.jn(n, lamb* var_r) * np.cos(n * angle)
* np.cos(lamb * v * t) for var_r in r])
u0 += u
x = np.array([var_r * np.cos(angle) for var_r in r])
y = np.array([var_r * np.sin(angle) for var_r in r])
return x, y, u0
def tambor_n(t = 0, n = 0):
fig = plt.figure(figsize = (6,5))
ax = fig.add_subplot(1, 1, 1)
x, y, u = tambor_n_allk(n, 1, 1, 15, t)
im = ax.pcolor(x, y, u, cmap = 'viridis')
ax.set_xlim(xmin = -1.1, xmax = 1.1)
ax.set_ylim(ymin = -1.1, ymax = 1.1)
plt.colorbar(im)
plt.axis('off')
fig.canvas.draw()
interact_manual(tambor_n, t = (0, 15,.01), n = (0, 10, 1));
"""
Explanation: Ahora, tal vez nos interesaría conocer el comportamiento de la membrana cuando sumamos sobre un conjunto de modos $k$. Es decir,
$$u(r,\theta, t){n} =\sum{k = 1}u(r,\theta, t){nk} = \sum{k = 1}J_{n}(\lambda_{nk} r)\,\cos(n\theta)\,\cos(\lambda_{nk} v t) $$
La manera usual de hacer esto es considerar la suma en series de Fourier, es decir a esta suma le falta un coeficiente $A_{nk}$, pero por simplicidad aquí no vamos a considerar este término.
Una posible función para realizar esto sería,
End of explanation
"""
def order_n(n, ceros, nk_zeros, angle, v, r, t):
lamb = ceros[0]
u0 = np.array([special.jn(n, lamb* var_r) * np.cos(n * angle)
* np.cos(lamb * v * t) for var_r in r])
for cero in range(1, nk_zeros):
lamb = ceros[cero]
u = np.array([special.jn(n, lamb* var_r) * np.cos(n * angle)
* np.cos(lamb * v * t) for var_r in r])
u0 += u
return u0
def tambor(orden_n, r_max, v, nk_zeros, t):
r = np.r_[0:r_max:100j]
angle = np.r_[0:2*np.pi:100j]
ceros = special.jn_zeros(0, nk_zeros)
u0 = order_n(0, ceros, nk_zeros, angle, v, r, t)
for n in range(1, orden_n):
u = order_n(n, ceros, nk_zeros, angle, v, r, t)
u0 += u
x = np.array([var_r * np.cos(angle) for var_r in r])
y = np.array([var_r * np.sin(angle) for var_r in r])
return x, y, u0
x, y, u = tambor(10, 1, 1, 5, 5)
plt.figure(figsize = (5, 5))
plt.pcolor(x, y, u, cmap = 'inferno')
plt.axis('on')
plt.show()
"""
Explanation: Por último, nos queda el caso cuando sumamos sobre todos los modos $n$. Es decir,
End of explanation
"""
|
rsterbentz/phys202-2015-work | assignments/assignment08/InterpolationEx01.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.interpolate import interp1d, interp2d
"""
Explanation: Interpolation Exercise 1
End of explanation
"""
f = np.load('trajectory.npz')
x = f['x']
y = f['y']
t = f['t']
assert isinstance(x, np.ndarray) and len(x)==40
assert isinstance(y, np.ndarray) and len(y)==40
assert isinstance(t, np.ndarray) and len(t)==40
"""
Explanation: 2D trajectory interpolation
The file trajectory.npz contains 3 Numpy arrays that describe a 2d trajectory of a particle as a function of time:
t which has discrete values of time t[i].
x which has values of the x position at those times: x[i] = x(t[i]).
x which has values of the y position at those times: y[i] = y(t[i]).
Load those arrays into this notebook and save them as variables x, y and t:
End of explanation
"""
x_approx = interp1d(t, x, kind='cubic')
y_approx = interp1d(t, y, kind='cubic')
traj_approx = interp2d(x, y, t, kind='cubic')
newt = np.linspace(t.min(),max(t),200)
newx = x_approx(newt)
newy = y_approx(newt)
assert newt[0]==t.min()
assert newt[-1]==t.max()
assert len(newt)==200
assert len(newx)==200
assert len(newy)==200
"""
Explanation: Use these arrays to create interpolated functions $x(t)$ and $y(t)$. Then use those functions to create the following arrays:
newt which has 200 points between ${t_{min},t_{max}}$.
newx which has the interpolated values of $x(t)$ at those times.
newy which has the interpolated values of $y(t)$ at those times.
End of explanation
"""
fig = plt.figure(figsize=(7,7))
plt.plot(newx, newy, marker='.')
plt.plot(x, y, 'ro')
plt.xticks([-1.0,-0.5,0.0,0.5,1.0])
plt.yticks([-1.0,-0.5,0.0,0.5,1.0])
plt.xlabel('x(t)')
plt.ylabel('y(t)')
assert True # leave this to grade the trajectory plot
"""
Explanation: Make a parametric plot of ${x(t),y(t)}$ that shows the interpolated values and the original points:
For the interpolated points, use a solid line.
For the original points, use circles of a different color and no line.
Customize you plot to make it effective and beautiful.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cmcc/cmip6/models/sandbox-3/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'sandbox-3', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: CMCC
Source ID: SANDBOX-3
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:50
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
jon-young/medicalimage | Liver Segmentation.ipynb | mit | sliceNum = 42
dicomPath = join(expanduser('~'), 'Documents', 'SlicerDICOMDatabase', 'TCIALocal', '0', 'images', '')
reader = sitk.ImageSeriesReader()
seriesIDread = reader.GetGDCMSeriesIDs(dicomPath)[1]
dicomFilenames = reader.GetGDCMSeriesFileNames(dicomPath, seriesIDread)
reader.SetFileNames(dicomFilenames)
imgSeries = reader.Execute()
imgSlice = imgSeries[:,:,sliceNum]
"""
Explanation: Read in DICOM images
End of explanation
"""
reader.GetGDCMSeriesIDs(dicomPath)
"""
Explanation: Note that the TCGA-BC-4073 patient has 2 series of images (series 9 & 10). The series IDs are:
End of explanation
"""
liversegmentation.sitk_show(imgSlice)
"""
Explanation: By comparing images between OsiriX and plots of the SimpleITK images, the 2<sup>nd</sup> tuple element corresponds to series 9.
End of explanation
"""
imgSliceUInt8 = sitk.Cast(sitk.RescaleIntensity(imgSlice), sitk.sitkUInt8)
"""
Explanation: Cast original slice to unsigned 8-bit integer so that segmentations can be overlaid on top
End of explanation
"""
anisoParams = (0.06, 9.0, 5)
imgFilter = liversegmentation.anisotropic_diffusion(imgSlice, *anisoParams)
liversegmentation.sitk_show(imgFilter)
"""
Explanation: Filtering
Curvature anisotropic diffusion
End of explanation
"""
med = sitk.MedianImageFilter()
med.SetRadius(3)
imgFilter = med.Execute(imgSlice)
liversegmentation.sitk_show(imgFilter)
"""
Explanation: Median filter
End of explanation
"""
#sigma = 3.0
sigma = 1.0
imgGauss = liversegmentation.gradient_magnitude(imgFilter, sigma)
liversegmentation.sitk_show(imgGauss)
"""
Explanation: Edge potential
Gradient magnitude recursive Gaussian
End of explanation
"""
#K1, K2 = 20.0, 6.0
#K1, K2 = 14.0, 4.0
K1, K2 = 8.0, 2.0
imgSigmoid = liversegmentation.sigmoid_filter(imgGauss, K1, K2)
liversegmentation.sitk_show(imgSigmoid)
"""
Explanation: Feature Image
Sigmoid mapping
End of explanation
"""
coords = [(118, 286), (135, 254), (202, 75), (169, 89), (145, 209), (142, 147), (252, 58), (205, 119)]
radii = [10, 10, 10, 10, 10, 10, 5, 5]
seed2radius = {tuple(reversed(p[0])): p[1] for p in zip(coords, radii)}
initImg = liversegmentation.input_level_set(imgSigmoid, seed2radius)
liversegmentation.sitk_show(initImg)
"""
Explanation: Input level set
Create 2 lists, one to hold the seed coordinates and the other for the radii. The radius in the 1<sup>st</sup> index corresponds to the 1<sup>st</sup> index, and so on.
End of explanation
"""
binaryThresh = sitk.BinaryThresholdImageFilter()
binaryThresh.SetLowerThreshold(-2.3438)
binaryThresh.SetUpperThreshold(0.0)
binaryThresh.SetInsideValue(1)
binaryThresh.SetOutsideValue(0)
binaryImg = binaryThresh.Execute(imgGac2)
liversegmentation.sitk_show(binaryImg)
"""
Explanation: Creating new level set from segmentation of downsampled image.
First convert the segmentation result into a workable format:
End of explanation
"""
coords2 = [(235, 108), (199, 188), (120, 113), (96, 140)]
radii2 = [5, 5, 5, 5]
seed2radius2 = {tuple(reversed(p[0])): p[1] for p in zip(coords2, radii2)}
"""
Explanation: Add in new seeds:
End of explanation
"""
X_1 = sitk.GetArrayFromImage(binaryImg)
# create a 2nd seed matrix from the 2nd set of coordinates
setupImg = sitk.Image(imgSigmoid.GetSize()[0], imgSigmoid.GetSize()[1], sitk.sitkUInt8)
X_2 = sitk.GetArrayFromImage(setupImg)
for i in range(X_2.shape[0]):
for j in range(X_2.shape[1]):
for s in seed2radius2.keys():
if euclidean((i,j), s) <= seed2radius2[s]:
X_2[i,j] = 1
X = X_1.astype(bool) + X_2.astype(bool)
initImg2 = sitk.Cast(sitk.GetImageFromArray(X.astype(int)), imgSigmoid.GetPixelIDValue()) * -1 + 0.5
initImg2.SetSpacing(imgSigmoid.GetSpacing())
initImg2.SetOrigin(imgSigmoid.GetOrigin())
initImg2.SetDirection(imgSigmoid.GetDirection())
liversegmentation.sitk_show(initImg2)
"""
Explanation: Now create new level set image:
End of explanation
"""
coords3 = [(225, 177), (246, 114), (83, 229), (78, 208), (82, 183), (238, 126)]
radii3 = [5, 10, 5, 5, 5, 15]
seed2radius3 = {tuple(reversed(p[0])): p[1] for p in zip(coords3, radii3)}
X_1 = sitk.GetArrayFromImage(binaryImg)
# create a 3rd seed matrix from the 3rd set of coordinates
setupImg = sitk.Image(imgSigmoid.GetSize()[0], imgSigmoid.GetSize()[1], sitk.sitkUInt8)
X_2 = sitk.GetArrayFromImage(setupImg)
for i in range(X_2.shape[0]):
for j in range(X_2.shape[1]):
for s in seed2radius3.keys():
if euclidean((i,j), s) <= seed2radius3[s]:
X_2[i,j] = 1
X = X_1.astype(bool) + X_2.astype(bool)
initImg3 = sitk.Cast(sitk.GetImageFromArray(X.astype(int)), imgSigmoid.GetPixelIDValue()) * -1 + 0.5
initImg3.SetSpacing(imgSigmoid.GetSpacing())
initImg3.SetOrigin(imgSigmoid.GetOrigin())
initImg3.SetDirection(imgSigmoid.GetDirection())
liversegmentation.sitk_show(initImg3)
"""
Explanation: Add in a 3<sup>rd</sup> set of seeds:
End of explanation
"""
#gacParams = (1.0, 0.2, 4.5, 0.01, 250)
#gacParams = (1.0, 0.2, 4.5, 0.01, 200)
gacParams = (1.0, 0.2, 5.0, 0.01, 350)
imgGac3 = liversegmentation.geodesic_active_contour(initImg3, imgSigmoid, *gacParams)
liversegmentation.sitk_show(imgGac)
"""
Explanation: Segmentation
Geodesic Active Contour
End of explanation
"""
labelLowThresh = -2.3438
labelUpThresh = 0.0
binarySegImg3 = liversegmentation.binary_threshold(imgGac3, labelLowThresh, labelUpThresh)
liversegmentation.sitk_show(sitk.LabelOverlay(imgSliceUInt8, binarySegImg3, backgroundValue=255))
"""
Explanation: Display overlay of segmentation over original slice:
End of explanation
"""
|
sevo/higher_order_functions | Immutable a Higher order functions.ipynb | mit | x = 'foo'
print(id(x))
print(id(x.upper()))
print(id(x + 'bar'))
"""
Explanation: Sutaz
Project Euler
Vyriesit co najviac uloh funkcionalne
Najlepsi dostanu plny pocet bodov z Python casti zaverecnej skusky
Nemenne objekty a funkcie vyssej urovne
Nemenné (Immutable) objekty
Nemenný objekt sa po vytvorení už nemôže meniť
End of explanation
"""
x = 'foo'
y = x
print(x, id(x))
x = 'bar'
print(x, id(x))# objekt foo sa nezmenil, to len x uz smeruje na iny objekt
print(y, id(y))
"""
Explanation: Neznamena to, ze referencia na objekt sa nemoze menit
v cisto funkcionalnom jazyku by sa nemalo diat ani to
End of explanation
"""
# -- JAVA --
final List<Integer> list = new ArrayList<Integer>();
list = new ArrayList<Integer>(); // toto sa neskompiluje
# -- JAVA --
final List<Integer> list = new ArrayList<Integer>();
list.add(1); //toto prejde bez problemov
# -- JAVA --
final List<Integer> list = Collections.unmodifiableList(new ArrayList<Integer>(...)); //toto je immutable list
"""
Explanation: Nie je to to iste ako klucove slovo final v Jave
Final premenna po vytvoreni nemoze smerovat na iny objekt
Objekt samtny ale moze byt zmeneny
End of explanation
"""
x = 'foo'
y = x
print(x) # foo
y += 'bar'
print(x) # foo
print(y)
x = [1, 2, 3]
y = x
print(x)
y += [3, 2, 1]
print(x)
"""
Explanation: Imutable znamena, ze hociaka operacia nad objektom vytvori novy objekt
End of explanation
"""
def func(val):
val += 'bar'
x = 'foo'
print(x)
func(x)
print(x)
def func(val):
val += [3, 2, 1]
x = [1, 2, 3]
print(x)
func(x)
print(x)
"""
Explanation: Pozor, v Pythone sa parametre funkcie predavaju referenciou
Pri mutable objektoch to moze sposobit necakane veci ak neviete, co sa vo funkcii deje
End of explanation
"""
a = 'text'
print(a)
print('Adresa je: {}'.format(id(a)))
# Znamena to, ze neviem menit hodnotu
a[0] = 'T'
print(a)
print('Adresa je: {}'.format(id(a)))
"""
Explanation: Ak predate immutable objekt funkcii, tak vam ho funkcia urcite nezmeni
String je imutable
Podobne ako vsetky zakladne typy
End of explanation
"""
a = [1,2,3,4,5]
print(a)
print('Adresa je: {}'.format(id(a)))
# Znamena to, ze neviem menit hodnotu
a[0] = 'T'
print(a)
print('Adresa je: {}'.format(id(a)))
"""
Explanation: List je mutable
End of explanation
"""
t1 = (1, 2, 3, 4, 5)
t1
t1[1]
t1[1]=3
"""
Explanation: Tuple je immutable
End of explanation
"""
t1 = (1, 2, 3, 4, 5)
# Ked chceme update, treba vyrobit novy objekt
t2 = t1[:2] + (17, ) + t1[3:]
t2
# alebo
l1 = list(t1)
l1[2] = 17
t2 = tuple(l1)
t2
# vs.
a = [1,2,3,4,5]
a[2] = 17
a
"""
Explanation: Nemennost moze komplikovat pracu s objektami
End of explanation
"""
# inspirovane https://www.youtube.com/watch?v=5qQQ3yzbKp8
employees = ['Jozo', 'Eva', 'Fero', 'Miro', 'Anna', 'Kristina']
output = '<ul>\n'
for employee in employees:
output += '\t<li>{}</li>\n'.format(employee)
# print('Adresa outputu je: {}'.format(id(output)))
output += '</ul>'
print(output)
"""
Explanation: Preco je nemennost dobra
Netreba pocitat s tym, ze sa vam moze objekt zmenit
Je to bezpecnejsie.
vznika menej chyb
Lahsie sa debuguje
Lahsie sa testuje
staci test na jednu funkciu a nie celu skupinu objektov
Toto je dovod, preco ma Test Driven Development (TDD) taky usepch
Testy sa píšu ešte pred kódom
Zamýšľate sa ako napísať kód tak aby bol testovateľný
Bez toho aby ste o tom vedeli odstraňujete vedľajšie efekty
Nazite sa o to, aby na sebe funkcie co najmenej zavyseli
Pripravovanie objektov je pre vas zbytocnou komplikaciou
Zmena stavu objktu sposobuje, ze musite pisat velmi vela tetsov aby ste osetrili mnozstvo hranicnych stavov
Da sa lahsie zdielat medzi vlaknami a procesmi
netreba synchronizovat pristup k objektom
Da sa hashovat
ak pouzijete objekt ako kluc, tak sa urcite nezmeni a mozete
hashovacia funkcia nad nim vzdy vrati rovnaku hodnotu
Objekty mozu byt mensie. Zaberaju menej miesta v pamati a operacie nad nimi su rychlejsie.
Ale
Je treba vytvarat velmi vela objektov.
Garbage collector sa narobi.
End of explanation
"""
import pyrsistent as ps
"""
Explanation: Ako zabezpecit nemennost objektov?
konvencia
vynutit si ju
S vela vecami si mozeme pomoct kniznicou Pyrsistent
End of explanation
"""
v1 = ps.pvector([1, 2, 3, 4])
v1 == ps.v(1, 2, 3, 4)
v1[1]
v1[1:3]
v1[1] = 3
v3 = v1.set(1, 5)
print(v3)
print(v1)
"""
Explanation: List / Vektor
End of explanation
"""
m1 = ps.pmap({'a':1, 'b':2})
m1 == ps.m(a=1, b=2)
m1['a']
m1.b # toto s dict nejde
print(m1.set('a', 3))
print(m1)
print(id(m1), id(m1.set('a', 3)))
"""
Explanation: Map / dict
End of explanation
"""
ps.freeze([1, {'a': 3}])
ps.thaw(ps.v(1, ps.m(a=3)))
"""
Explanation: Transformacia mutable <=> immutable
End of explanation
"""
v1 = ps.v(0, 1, 2, 3, 4, 5, 6, 7, 8)
print(v1)
v2 = v1.set(5, 'beef')
print(v2)
"""
Explanation: ... a dalsie immutable struktury
https://github.com/tobgu/pyrsistent
PVector, similar to a python list
PMap, similar to dict
PSet, similar to set
PRecord, a PMap on steroids with fixed fields, optional type and invariant checking and much more
PClass, a Python class fixed fields, optional type and invariant checking and much more
Checked collections, PVector, PMap and PSet with optional type and invariance checks and more
PBag, similar to collections.Counter
PList, a classic singly linked list
PDeque, similar to collections.deque
Immutable object type (immutable) built on the named tuple
freeze and thaw functions to convert between pythons standard collections and pyrsistent collections.
Flexible transformations of arbitrarily complex structures built from PMaps and PVectors.
Da sa nieco spravit s tou spotrebou pamati?
Po niektorych operaciach sa objekty dost podobaju
End of explanation
"""
def process_item(x):
return x*x
item_list = [1,2,3,4,5,6]
# impertivny zapis
collection = []
for item in item_list:
partial_result = process_item(item)
collection.append(partial_result)
collection
# C-like zapis
collection = []
index = 0
while index < len(item_list):
partial_result = process_item(item_list[index])
collection.append(partial_result)
index += 1
collection
"""
Explanation: Zdielanie casti datovej struktury
pvector([0, 1, 2, 3, 4, 5, 6, 7, 8])
pvector([0, 1, 2, 3, 4, 'beef', 6, 7, 8])
http://hypirion.com/musings/understanding-persistent-vector-pt-1
Higher order functions
Funkcional v LISPe je funkcia, ktora ma ako argument funkciu alebo funkciu vracia
FUNCALL - vykonanie funkcie s argumentami
MAPCAR - zobrazenie
REMOVE-IF/REMOVE-IF-NOT - filter
REDUCE - redukcia
...
V Pythone a inych jazykoch
Funkcia vyssej urovne (Higher order function) - je funkcia, ktora dostava funkciu ako parameter
Generator - je funkcia, ktora vracia funkciu
Funkcie vyssej urovne sa daju velmi dobre pouzit na spracovanie zoznamu
Najcastejsie operacie so zoznamom:
* zobrazenie
* filter
* redukcia
Zobrazenie
Aplikovanie funkcie na vsetky prvky zoznamu a vytvorenie noveho zoznamu z transformovanych prvkov
End of explanation
"""
def process_item(x):
return x*x
item_list = [1,2,3,4,5,6]
# funkcionalny zapis
collection = map(process_item, item_list)
collection
"""
Explanation: Zobrazenie pomocou funkcie vyssej urovne je prehladnejsie
End of explanation
"""
def fahrenheit(T):
return ((float(9)/5)*T + 32)
def celsius(T):
return (float(5)/9)*(T-32)
temperatures = (36.5, 37, 37.5, 38, 39)
F = list(map(fahrenheit, temperatures))
C = list(map(celsius, F))
print(F)
print(C)
"""
Explanation: Dalsi priklad pouzitia funkcie map
End of explanation
"""
list(map(len, open('data/morho.txt')))
list(map(print, open('data/morho.txt')))
"""
Explanation: Alebo este iny
End of explanation
"""
def my_map(f, seq): # Takto by to mohlo byt v pythone 2 a nie 3. Tam map vracia iterator.
result = []
for x in seq:
result.append(f(x))
return result
"""
Explanation: Funkcia map odstranuje potrebu udrzovat si stav
nepotrebujem ziadnu kolekciu, ktora je v nejakom case ciastocne naplnena
nepotrebujem ziadny index, ktory sa inkrementuje
nestaram sa o to, ako map funguje
iterativne, rekurziou, paralelne, distribuovane, pomocou indexu?
nestaram sa o vnutornu strukturu kolekcie
staci aby sa cez nu dalo iterovat
o tomto si povieme viac nabuduce
Funkcia map by mohla byt implementovana napriklad takto
End of explanation
"""
item_list = [1,2,3,4,5,6]
def condition(x):
return(x % 2 == 0)
collection = []
for item in item_list:
if condition(item):
collection.append(item)
collection
"""
Explanation: Filter
Zo zoznamu sa vytvara novy zoznam s tymi prvkami, ktore splnaju podmienku
End of explanation
"""
item_list = [1,2,3,4,5,6]
def condition(x):
return(x % 2 == 0)
collection = filter(condition, item_list)
list(collection)
"""
Explanation: Filter pomocou funkcie vyssej urovne
End of explanation
"""
fibonacci = [0,1,1,2,3,5,8,13,21,34,55]
def is_even(x):
return x % 2 == 0
list(filter(is_even, fibonacci))
"""
Explanation: Dalsi priklad pouzitia funkcie Filter
End of explanation
"""
item_list = [47,11,42,13]
def add(a,b):
return(a+b)
from functools import reduce
reduce(add, item_list)
"""
Explanation: Redukcia
reduce(func, seq, init)
func(a, b)
Opakovane aplikuje funkciu na sekvenciu.
func prijma dva argumenty: hodnotu akumulatora a jeden prvok mnoziny
Atributom func moze byt prvok sekvencie alebo navratova hodnota inej func
Typicky priklad je suma prvkov zoznamu
End of explanation
"""
total = 0 # Takto by to bolo imperativne
for item in item_list:
total = add(total, item)
total
"""
Explanation:
End of explanation
"""
from functools import reduce
def mul(a,b):
return a * b
reduce(mul, [1,2,3,4,5])
"""
Explanation: Dalsi priklad - nasobenie prvkov zoznamu
End of explanation
"""
from operator import add
from operator import mul
"""
Explanation: Vela funkcii uz je predpripravenych
End of explanation
"""
from functools import reduce
from operator import add
print(reduce(add, open('data/morho.txt')))
"""
Explanation: Da sa spracovavat aj nieco ine ako cisla
End of explanation
"""
from operator import or_
reduce(or_, ({1}, {1, 2}, {1, 3})) # union
from operator import and_
reduce(and_, ({1}, {1, 2}, {1, 3}))
"""
Explanation: Da sa napriklad pracovat s mnozinami
End of explanation
"""
my_sum = lambda x, y: x + y
my_sum(1,2)
"""
Explanation: Lambda funkcia
anonymna funkcia
End of explanation
"""
item_list = [1,2,3,4,5]
print(list(map(lambda x: x**2, item_list)))
item_list = ["auto", "macka", "traktor"]
list(map(lambda x: x.upper(), item_list))
"""
Explanation: obemdzenie na jediny riadok
nepotrebuje return
Lambda je celkom prakticka ako parameter funkcie
End of explanation
"""
print(list(map(lambda x: x**2, [1,2,3,4,5])))
print([x**2 for x in [1,2,3,4,5]])
print(list(filter(lambda x: x % 2 == 0, [1,2,3,4,5])))
print([x for x in [1,2,3,4,5] if x % 2 == 0])
"""
Explanation: Spracovanie zoznamu (list comprehension)
End of explanation
"""
|
JaviMerino/lisa | ipynb/android/workloads/Android_YouTube.ipynb | apache-2.0 | import logging
reload(logging)
log_fmt = '%(asctime)-9s %(levelname)-8s: %(message)s'
logging.basicConfig(format=log_fmt)
# Change to info once the notebook runs ok
logging.getLogger().setLevel(logging.INFO)
%pylab inline
import os
import pexpect as pe
from time import sleep
# Support to access the remote target
import devlib
from env import TestEnv
from devlib.utils.android import adb_command
# Support for trace events analysis
from trace import Trace
# Suport for FTrace events parsing and visualization
import trappy
# Set it to your local CATAPULT home folder
CATAPULT_HOME = "/home/pippo/work/catapult"
"""
Explanation: EAS Testing - YouTube on Android
The goal of this experiment is to run Youtube videos on a Nexus N5X running Android with an EAS kernel and collect results. The Analysis phase will consist in comparing EAS with other schedulers, that is comparing sched governor with:
- interactive
- performance
- powersave
- ondemand
End of explanation
"""
# Setup a target configuration
my_target_conf = {
# Target platform and board
"platform" : 'android',
# Add target support
"board" : 'n5x',
# Device ID
#"device" : "00b1346f0878ccb1",
# Define devlib modules to load
"modules" : [
'cpufreq' # enable CPUFreq support
],
}
my_tests_conf = {
# Folder where all the results will be collected
"results_dir" : "Android_Youtube",
# Platform configurations to test
"confs" : [
{
"tag" : "youtube",
"flags" : "ftrace", # Enable FTrace events
"sched_features" : "ENERGY_AWARE", # enable EAS
},
],
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"events" : [
"sched_switch",
"sched_load_avg_cpu",
"cpu_frequency",
"cpu_capacity"
],
"buffsize" : 10 * 1024,
},
# Tools required by the experiments
"tools" : [ 'trace-cmd' ],
}
# Ensure ADB has root priviledges, which are required by systrace
!adb root
# Initialize a test environment using:
# the provided target configuration (my_target_conf)
# the provided test configuration (my_test_conf)
te = TestEnv(target_conf=my_target_conf, test_conf=my_tests_conf)
target = te.target
"""
Explanation: Test Environment set up
In case more than one Android device are conencted to the host, you must specify the ID of the device you want to target in my_target_conf. Run adb devices on your host to get the ID.
End of explanation
"""
def set_performance():
target.cpufreq.set_all_governors('performance')
def set_powersave():
target.cpufreq.set_all_governors('powersave')
def set_interactive():
target.cpufreq.set_all_governors('interactive')
def set_sched():
target.cpufreq.set_all_governors('sched')
def set_ondemand():
target.cpufreq.set_all_governors('ondemand')
for cpu in target.list_online_cpus():
tunables = target.cpufreq.get_governor_tunables(cpu)
target.cpufreq.set_governor_tunables(
cpu,
'ondemand',
**{'sampling_rate' : tunables['sampling_rate_min']}
)
# CPUFreq configurations to test
confs = {
'performance' : {
'label' : 'prf',
'set' : set_performance,
},
#'powersave' : {
# 'label' : 'pws',
# 'set' : set_powersave,
#},
'interactive' : {
'label' : 'int',
'set' : set_interactive,
},
#'sched' : {
# 'label' : 'sch',
# 'set' : set_sched,
#},
#'ondemand' : {
# 'label' : 'odm',
# 'set' : set_ondemand,
#}
}
# The set of results for each comparison test
results = {}
YOUTUBE_CMD = 'shell dumpsys gfxinfo com.google.android.youtube > {}'
def youtube_run(exp_dir, video_url, video_duration_s):
# Unlock device screen (assume no password required)
target.execute('input keyevent 82')
# Press Back button to be sure we run the video from the start
target.execute('input keyevent KEYCODE_BACK')
# Start YouTube video on the target device
target.execute('am start -a android.intent.action.VIEW "{}"'.format(video_url))
# Allow the activity to start
sleep(3)
# Reset framestats collection
target.execute('dumpsys gfxinfo --reset')
# Wait until the end of the video
sleep(video_duration_s)
# Get frame stats
framestats_file = os.path.join(exp_dir, "framestats.txt")
adb_command(target.adb_name, YOUTUBE_CMD.format(framestats_file))
# Close application
target.execute('am force-stop com.google.android.youtube')
# Clear application data
target.execute('pm clear com.google.android.youtube')
return framestats_file
SYSTRACE_CMD = CATAPULT_HOME + "/systrace/systrace/systrace.py -o {} gfx view sched freq idle -t {}"
def experiment(governor, exp_dir, collect='ftrace', trace_time=30):
os.system('mkdir -p {}'.format(exp_dir));
logging.info('------------------------')
logging.info('Run workload using %s governor', governor)
confs[governor]['set']()
# Start the required tracing command
if 'ftrace' in collect:
# Start FTrace and Energy monitoring
te.ftrace.start()
elif 'systrace' in collect:
# Start systrace
trace_file = os.path.join(exp_dir, 'trace.html')
trace_cmd = SYSTRACE_CMD.format(trace_file, trace_time)
logging.info('SysTrace: %s', trace_cmd)
systrace_output = pe.spawn(trace_cmd)
### Run the benchmark ###
framestats_file = youtube_run(exp_dir, "https://youtu.be/XSGBVzeBUbk?t=45s", trace_time)
# Stop the required trace command
if 'ftrace' in collect:
te.ftrace.stop()
# Collect and keep track of the trace
trace_file = os.path.join(exp_dir, 'trace.dat')
te.ftrace.get_trace(trace_file)
elif 'systrace' in collect:
logging.info('Waiting systrace report [%s]...', trace_file)
systrace_output.wait()
# Parse trace
tr = Trace(te.platform, trace_file,
events=my_tests_conf['ftrace']['events'])
# return all the experiment data
return {
'dir' : exp_dir,
'framestats_file' : framestats_file,
'trace' : trace_file,
'ftrace' : tr.ftrace
}
"""
Explanation: Support Functions
This set of support functions will help us running the benchmark using different CPUFreq governors.
End of explanation
"""
# Run the benchmark in all the configured governors
for governor in confs:
test_dir = os.path.join(te.res_dir, governor)
results[governor] = experiment(governor, test_dir,
collect='systrace', trace_time=15)
"""
Explanation: Run experiments and collect traces
End of explanation
"""
for governor in confs:
framestats_file = results[governor]['framestats_file']
print "Frame Statistics for {} governor".format(governor.upper())
!sed '/Stats since/,/99th/!d;/99th/q' $framestats_file
print ""
trace_file = results['interactive']['trace']
!xdg-open {trace_file}
"""
Explanation: UI Performance Analysis
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.