repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
arcyfelix/Courses | 17-09-27-AWS Machine Learning A Complete Guide With Python/07 - Binary Classification/01 - ml_logistic_cost_example.ipynb | apache-2.0 | # Sigmoid or logistic function
# For any x, output is bounded to 0 & 1.
def sigmoid_func(x):
return 1.0/(1 + math.exp(-x))
sigmoid_func(10)
sigmoid_func(-100)
sigmoid_func(0)
# Sigmoid function example
x = pd.Series(np.arange(-8, 8, 0.5))
y = x.map(sigmoid_func)
x.head()
fig = plt.figure(figsize = (12, 8))
plt.plot(x,y)
plt.ylim((-0.2, 1.2))
plt.xlabel('input')
plt.ylabel('sigmoid output')
plt.grid(True)
plt.axvline(x = 0, ymin = 0, ymax = 1, ls = 'dashed')
plt.axhline(y = 0.5, xmin = 0, xmax = 10, ls = 'dashed')
plt.axhline(y = 1.0, xmin = 0, xmax = 10, color = 'r')
plt.axhline(y = 0.0, xmin = 0, xmax = 10, color = 'r')
plt.title('Sigmoid')
"""
Explanation: <h4>Classification Overview</h4>
<ul>
<li>Predict a binary class as output based on given features.
</li>
<li>Examples: Do we need to follow up on a customer review? Is this transaction fraudulent or valid one? Are there signs of onset of a medical condition or disease? Is this considered junk food or not?</li>
<li>Linear Model. Estimated Target = w<sub>0</sub> + w<sub>1</sub>x<sub>1</sub>
+ w<sub>2</sub>x<sub>2</sub> + w<sub>3</sub>x<sub>3</sub>
+ … + w<sub>n</sub>x<sub>n</sub><br>
where, w is the weight and x is the feature
</li>
<li><b>Logistic Regression</b>. Estimated Probability = <b>sigmoid</b>(w<sub>0</sub> + w<sub>1</sub>x<sub>1</sub>
+ w<sub>2</sub>x<sub>2</sub> + w<sub>3</sub>x<sub>3</sub>
+ … + w<sub>n</sub>x<sub>n</sub>)<br>
where, w is the weight and x is the feature
</li>
<li>Linear model output is fed thru a sigmoid or logistic function to produce the probability.</li>
<li>Predicted Value: Probability of a binary outcome. Closer to 1 is positive class, closer to 0 is negative class</li>
<li>Algorithm Used: Logistic Regression. Objective is to find the weights w that maximizes separation between the two classes</li>
<li>Optimization: Stochastic Gradient Descent. Seeks to minimize loss/cost so that predicted value is as close to actual as possible</li>
<li>Cost/Loss Calculation: Logistic loss function</li>
</ul>
End of explanation
"""
data_path = r'..\Data\ClassExamples\HoursExam\HoursExamResult.csv'
df = pd.read_csv(data_path)
"""
Explanation: Example Dataset - Hours spent and Exam Results:
https://en.wikipedia.org/wiki/Logistic_regression
Sigmoid function produces an output between 0 and 1 no. Input closer to 0 produces and output of 0.5 probability. Negative input produces value less than 0.5 while positive input produces value greater than 0.5
End of explanation
"""
df.head()
# optimal weights given in the wiki dataset
def straight_line(x):
return 1.5046 * x - 4.0777
# How does weight affect outcome
def straight_line_weight(weight1, x):
return weight1 * x - 4.0777
# Generate probability by running feature thru the linear model and then thru sigmoid function
y_vals = df.Hours.map(straight_line).map(sigmoid_func)
fig = plt.figure(figsize = (12, 8))
plt.scatter(x = df.Hours,
y = y_vals,
color = 'b',
label = 'logistic')
plt.scatter(x = df[df.Pass == 1].Hours,
y = df[df.Pass == 1].Pass,
color = 'g',
label = 'pass')
plt.scatter(x = df[df.Pass == 0].Hours,
y = df[df.Pass == 0].Pass,
color = 'r',
label = 'fail')
plt.title('Hours Spent Reading - Pass Probability')
plt.xlabel('Hours')
plt.ylabel('Pass Probability')
plt.grid(True)
plt.xlim((0,7))
plt.ylim((-0.2,1.5))
plt.axvline(x = 2.75,
ymin = 0,
ymax=1)
plt.axhline(y = 0.5,
xmin = 0,
xmax = 6,
label = 'cutoff at 0.5',
ls = 'dashed')
plt.axvline(x = 2,
ymin = 0,
ymax = 1)
plt.axhline(y = 0.3,
xmin = 0,
xmax = 6,
label = 'cutoff at 0.3',
ls = 'dashed')
plt.axvline(x = 3,
ymin = 0,
ymax=1)
plt.axhline(y = 0.6,
xmin = 0,
xmax = 6,
label='cutoff at 0.6',
ls = 'dashed')
plt.legend()
"""
Explanation: Input Feature: Hours<br>
Output: Pass (1 = pass, 0 = fail)
End of explanation
"""
weights = [0, 1, 2]
y_at_weight = {}
for w in weights:
y_calculated = []
y_at_weight[w] = y_calculated
for x in df.Hours:
y_calculated.append(sigmoid_func(straight_line_weight(w, x)))
y_sig_vals = y_vals.map(sigmoid_func)
fig = plt.figure(figsize = (12, 8))
plt.scatter(x = df.Hours,
y = y_vals,
color = 'b',
label = 'logistic curve')
plt.scatter(x = df[df.Pass==1].Hours,
y = df[df.Pass==1].Pass,
color = 'g',
label = 'pass')
plt.scatter(x = df[df.Pass==0].Hours,
y = df[df.Pass==0].Pass,
color = 'r',
label = 'fail')
plt.scatter(x = df.Hours,
y = y_at_weight[0],
color = 'k',
label = 'at wt 0')
plt.scatter(x = df.Hours,
y = y_at_weight[1],
color = 'm',
label = 'at wt 1')
plt.scatter(x = df.Hours,
y = y_at_weight[2],
color = 'y',
label = 'at wt 2')
plt.xlim((0,8))
plt.ylim((-0.2, 1.5))
plt.axhline(y = 0.5,
xmin = 0,
xmax = 6,
color = 'b',
ls = 'dashed')
plt.axvline(x = 4,
ymin = 0,
ymax = 1,
color = 'm',
ls = 'dashed')
plt.xlabel('Hours')
plt.ylabel('Pass Probability')
plt.grid(True)
plt.title('How weights impact classification - cutoff 0.5')
plt.legend()
"""
Explanation: At 2.7 hours of study time, we hit 0.5 probability. So, any student who spent 2.7 hours or more would have a higher probability of passing the exam.
In the above example,<br>
1. Top right quadrant = true positive. pass got classified correctly as pass
2. Bottom left quadrant = true negative. fail got classified correctly as fail
3. Top left quadrant = false negative. pass got classified as fail
4. Bottom right quadrant = false positive. fail got classified as pass
Cutoff can be adjusted; instead of 0.5, cutoff could be established at 0.4 or 0.6 depending on the nature of problem and impact of misclassification
End of explanation
"""
# Cost Function
z = pd.Series(np.linspace(0.000001, 0.999999, 100))
ypositive = -z.map(math.log)
ynegative = -z.map(lambda x: math.log(1-x))
fig = plt.figure(figsize = (12, 8))
plt.plot(z,
ypositive,
label = 'Loss curve for positive example')
plt.plot(z,
ynegative,
label = 'Loss curve for negative example')
plt.ylabel('Loss')
plt.xlabel('Class')
plt.title('Loss Curve')
plt.legend()
"""
Explanation: Logistic Regression Cost/Loss Function<br>
End of explanation
"""
def compute_logisitic_cost(y_actual, y_predicted):
y_pos_cost = y_predicted[y_actual == 1]
y_neg_cost = y_predicted[y_actual == 0]
positive_cost = (-y_pos_cost.map(math.log)).sum()
negative_cost = -y_neg_cost.map(lambda x: math.log(1 - x)).sum()
return positive_cost + negative_cost
# Example of how prediction vs actual impact loss
# Prediction is exact opposite of actual. Loss/Cost should be very high
actual = pd.Series([1, 0, 1])
predicted = pd.Series([0.001, .9999, 0.0001])
print('Loss: {0:0.3f}'.format(compute_logisitic_cost(actual, predicted)))
# Prediction is close to actual. Loss/Cost should be very low
y_actual = pd.Series([1, 0, 1])
y_predicted = pd.Series([0.9, 0.1, 0.8])
print('Loss: {0:0.3f}'.format(compute_logisitic_cost(y_actual, y_predicted)))
# Prediction is midpoint. Loss/Cost should be high
y_actual = pd.Series([1, 0, 1])
y_predicted = pd.Series([0.5, 0.5, 0.5])
print('Loss: {0:0.3f}'.format(compute_logisitic_cost(y_actual, y_predicted)))
# Prediction is midpoint. Loss/Cost should be high
y_actual = pd.Series([1, 0, 1])
y_predicted = pd.Series([0.8, 0.4, 0.7])
print('Loss: {0:0.3f}'.format(compute_logisitic_cost(y_actual, y_predicted)))
weight = pd.Series(np.linspace(-1.5, 5, num = 100))
cost = []
cost_at_wt = []
for w1 in weight:
y_calculated = []
for x in df.Hours:
y_calculated.append (sigmoid_func(straight_line_weight(w1, x)))
cost_at_wt.append(compute_logisitic_cost(df.Pass, pd.Series(y_calculated)))
fig = plt.figure(figsize = (12, 8))
plt.scatter(x = weight, y = cost_at_wt)
plt.xlabel('Weight')
plt.ylabel('Cost')
plt.grid(True)
plt.axvline(x = 1.5,
ymin = 0,
ymax = 100,
label = 'Minimal loss')
plt.axhline(y = 6.5,
xmin = 0,
xmax = 6)
plt.title('Finding optimal weights')
plt.legend()
"""
Explanation: Cost function is a log curve<br>
1. positive example correctly classified as positive is given a lower loss/cost
2. positive example incorrectly classified as negative is given a higher loss/cost
3. Negative example correctly classified as negative is given a lower loss/cost
4. Negative example incorrectly classifed as positive is given a higher loss/cost
End of explanation
"""
|
besser82/shogun | doc/ipython-notebooks/classification/MKL.ipynb | bsd-3-clause | %pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all shogun classes
import shogun as sg
from shogun import *
"""
Explanation: Multiple Kernel Learning
By Saurabh Mahindre - <a href="https://github.com/Saurabh7">github.com/Saurabh7</a>
This notebook is about multiple kernel learning in shogun. We will see how to construct a combined kernel, determine optimal kernel weights using MKL and use it for different types of classification and novelty detection.
Introduction
Mathematical formulation
Using a Combined kernel
Example: Toy Data
Generating Kernel weights
Binary classification using MKL
MKL for knowledge discovery
Multiclass classification using MKL
One-class classification using MKL
End of explanation
"""
kernel = CombinedKernel()
"""
Explanation: Introduction
<em>Multiple kernel learning</em> (MKL) is about using a combined kernel i.e. a kernel consisting of a linear combination of arbitrary kernels over different domains. The coefficients or weights of the linear combination can be learned as well.
Kernel based methods such as support vector machines (SVMs) employ a so-called kernel function $k(x_{i},x_{j})$ which intuitively computes the similarity between two examples $x_{i}$ and $x_{j}$. </br>
Selecting the kernel function
$k()$ and it's parameters is an important issue in training. Kernels designed by humans usually capture one aspect of data. Choosing one kernel means to select exactly one such aspect. Which means combining such aspects is often better than selecting.
In shogun the MKL is the base class for MKL. We can do classifications: binary, one-class, multiclass and regression too: regression.
Mathematical formulation (skip if you just want code examples)
</br>In a SVM, defined as:
$$f({\bf x})=\text{sign} \left(\sum_{i=0}^{N-1} \alpha_i k({\bf x}, {\bf x_i})+b\right)$$</br>
where ${\bf x_i},{i = 1,...,N}$ are labeled training examples ($y_i \in {±1}$).
One could make a combination of kernels like:
$${\bf k}(x_i,x_j)=\sum_{k=0}^{K} \beta_k {\bf k_k}(x_i, x_j)$$
where $\beta_k > 0$ and $\sum_{k=0}^{K} \beta_k = 1$
In the multiple kernel learning problem for binary classification one is given $N$ data points ($x_i, y_i$ )
($y_i \in {±1}$), where $x_i$ is translated via $K$ mappings $\phi_k(x) \rightarrow R^{D_k} $, $k=1,...,K$ , from the input into $K$ feature spaces $(\phi_1(x_i),...,\phi_K(x_i))$ where $D_k$ denotes dimensionality of the $k$-th feature space.
In MKL $\alpha_i$,$\beta$ and bias are determined by solving the following optimization program. For details see [1].
$$\mbox{min} \hspace{4mm} \gamma-\sum_{i=1}^N\alpha_i$$
$$ \mbox{w.r.t.} \hspace{4mm} \gamma\in R, \alpha\in R^N \nonumber$$
$$\mbox {s.t.} \hspace{4mm} {\bf 0}\leq\alpha\leq{\bf 1}C,\;\;\sum_{i=1}^N \alpha_i y_i=0 \nonumber$$
$$ {\frac{1}{2}\sum_{i,j=1}^N \alpha_i \alpha_j y_i y_j \leq \gamma}, \forall k=1,\ldots,K\nonumber\
$$
Here C is a pre-specified regularization parameter.
Within shogun this optimization problem is solved using semi-infinite programming. For 1-norm MKL one of the two approaches described in [1] is used.
The first approach (also called the wrapper algorithm) wraps around a single kernel SVMs, alternatingly solving for $\alpha$ and $\beta$. It is using a traditional SVM to generate new violated constraints and thus requires a single kernel SVM and any of the SVMs contained in shogun can be used. In the MKL step either a linear program is solved via glpk or cplex or analytically or a newton (for norms>1) step is performed.
The second much faster but also more memory demanding approach performing interleaved optimization, is integrated into the chunking-based SVMlight.
Using a Combined kernel
Shogun provides an easy way to make combination of kernels using the CombinedKernel class, to which we can append any kernel from the many options shogun provides. It is especially useful to combine kernels working on different domains and to combine kernels looking at independent features and requires CombinedFeatures to be used. Similarly the CombinedFeatures is used to combine a number of feature objects into a single CombinedFeatures object
End of explanation
"""
num=30;
num_components=4
means=zeros((num_components, 2))
means[0]=[-1,1]
means[1]=[2,-1.5]
means[2]=[-1,-3]
means[3]=[2,1]
covs=array([[1.0,0.0],[0.0,1.0]])
gmm=GMM(num_components)
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
[gmm.set_nth_cov(covs,i) for i in range(num_components)]
gmm.set_coef(array([1.0,0.0,0.0,0.0]))
xntr=array([gmm.sample() for i in range(num)]).T
xnte=array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(array([0.0,1.0,0.0,0.0]))
xntr1=array([gmm.sample() for i in range(num)]).T
xnte1=array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(array([0.0,0.0,1.0,0.0]))
xptr=array([gmm.sample() for i in range(num)]).T
xpte=array([gmm.sample() for i in range(5000)]).T
gmm.set_coef(array([0.0,0.0,0.0,1.0]))
xptr1=array([gmm.sample() for i in range(num)]).T
xpte1=array([gmm.sample() for i in range(5000)]).T
traindata=concatenate((xntr,xntr1,xptr,xptr1), axis=1)
trainlab=concatenate((-ones(2*num), ones(2*num)))
testdata=concatenate((xnte,xnte1,xpte,xpte1), axis=1)
testlab=concatenate((-ones(10000), ones(10000)))
#convert to shogun features and generate labels for data
feats_train=features(traindata)
labels=BinaryLabels(trainlab)
_=jet()
figure(figsize(18,5))
subplot(121)
# plot train data
_=scatter(traindata[0,:], traindata[1,:], c=trainlab, s=100)
title('Toy data for classification')
axis('equal')
colors=["blue","blue","red","red"]
# a tool for visualisation
from matplotlib.patches import Ellipse
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
vals, vecs = eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
for i in range(num_components):
gca().add_artist(get_gaussian_ellipse_artist(means[i], covs, color=colors[i]))
"""
Explanation: Prediction on toy data
In order to see the prediction capabilities, let us generate some data using the GMM class. The data is sampled by setting means (GMM notebook) such that it sufficiently covers X-Y grid and is not too easy to classify.
End of explanation
"""
width0=0.5
kernel0=sg.kernel("GaussianKernel", log_width=np.log(width0))
width1=25
kernel1=sg.kernel("GaussianKernel", log_width=np.log(width1))
#combine kernels
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.init(feats_train, feats_train)
mkl = MKLClassification()
#set the norm, weights sum to 1.
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(labels)
#train to get weights
mkl.train()
w=kernel.get_subkernel_weights()
print(w)
"""
Explanation: Generating Kernel weights
Just to help us visualize let's use two gaussian kernels (GaussianKernel) with considerably different widths. As required in MKL, we need to append them to the Combined kernel. To generate the optimal weights (i.e $\beta$s in the above equation), training of MKL is required. This generates the weights as seen in this example.
End of explanation
"""
size=100
x1=linspace(-5, 5, size)
x2=linspace(-5, 5, size)
x, y=meshgrid(x1, x2)
#Generate X-Y grid test data
grid=features(array((ravel(x), ravel(y))))
kernel0t=sg.kernel("GaussianKernel", log_width=np.log(width0))
kernel1t=sg.kernel("GaussianKernel", log_width=np.log(width1))
kernelt=CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
#initailize with test grid
kernelt.init(feats_train, grid)
mkl.set_kernel(kernelt)
#prediction
grid_out=mkl.apply()
z=grid_out.get_values().reshape((size, size))
figure(figsize=(10,5))
title("Classification using MKL")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
"""
Explanation: Binary classification using MKL
Now with the data ready and training done, we can do the binary classification. The weights generated can be intuitively understood. We will see that on plotting individual subkernels outputs and outputs of the MKL classification. To apply on test features, we need to reinitialize the kernel with kernel.init and pass the test features. After that it's just a matter of doing mkl.apply to generate outputs.
End of explanation
"""
z=grid_out.get_labels().reshape((size, size))
# MKL
figure(figsize=(20,5))
subplot(131, title="Multiple Kernels combined")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
comb_ker0=CombinedKernel()
comb_ker0.append_kernel(kernel0)
comb_ker0.init(feats_train, feats_train)
mkl.set_kernel(comb_ker0)
mkl.train()
comb_ker0t=CombinedKernel()
comb_ker0t.append_kernel(kernel0)
comb_ker0t.init(feats_train, grid)
mkl.set_kernel(comb_ker0t)
out0=mkl.apply()
# subkernel 1
z=out0.get_labels().reshape((size, size))
subplot(132, title="Kernel 1")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
comb_ker1=CombinedKernel()
comb_ker1.append_kernel(kernel1)
comb_ker1.init(feats_train, feats_train)
mkl.set_kernel(comb_ker1)
mkl.train()
comb_ker1t=CombinedKernel()
comb_ker1t.append_kernel(kernel1)
comb_ker1t.init(feats_train, grid)
mkl.set_kernel(comb_ker1t)
out1=mkl.apply()
# subkernel 2
z=out1.get_labels().reshape((size, size))
subplot(133, title="kernel 2")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
"""
Explanation: To justify the weights, let's train and compare two subkernels with the MKL classification output. Training MKL classifier with a single kernel appended to a combined kernel makes no sense and is just like normal single kernel based classification, but let's do it for comparison.
End of explanation
"""
kernelt.init(feats_train, features(testdata))
mkl.set_kernel(kernelt)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print("Test error is %2.2f%% :MKL" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))
comb_ker0t.init(feats_train,features(testdata))
mkl.set_kernel(comb_ker0t)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print("Test error is %2.2f%% :Subkernel1"% (100*evaluator.evaluate(out,BinaryLabels(testlab))))
comb_ker1t.init(feats_train, features(testdata))
mkl.set_kernel(comb_ker1t)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print("Test error is %2.2f%% :subkernel2" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))
"""
Explanation: As we can see the multiple kernel output seems just about right. Kernel 1 gives a sort of overfitting output while the kernel 2 seems not so accurate. The kernel weights are hence so adjusted to get a refined output. We can have a look at the errors by these subkernels to have more food for thought. Most of the time, the MKL error is lesser as it incorporates aspects of both kernels. One of them is strict while other is lenient, MKL finds a balance between those.
End of explanation
"""
def circle(x, radius, neg):
y=sqrt(square(radius)-square(x))
if neg:
return[x, -y]
else:
return [x,y]
def get_circle(radius):
neg=False
range0=linspace(-radius,radius,100)
pos_a=array([circle(i, radius, neg) for i in range0]).T
neg=True
neg_a=array([circle(i, radius, neg) for i in range0]).T
c=concatenate((neg_a,pos_a), axis=1)
return c
def get_data(r1, r2):
c1=get_circle(r1)
c2=get_circle(r2)
c=concatenate((c1, c2), axis=1)
feats_tr=features(c)
return c, feats_tr
l=concatenate((-ones(200),ones(200)))
lab=BinaryLabels(l)
#get two circles with radius 2 and 4
c, feats_tr=get_data(2,4)
c1, feats_tr1=get_data(2,3)
_=gray()
figure(figsize=(10,5))
subplot(121)
title("Circles with different separation")
p=scatter(c[0,:], c[1,:], c=lab)
subplot(122)
q=scatter(c1[0,:], c1[1,:], c=lab)
"""
Explanation: MKL for knowledge discovery
MKL can recover information about the problem at hand. Let us see this with a binary classification problem. The task is to separate two concentric classes shaped like circles. By varying the distance between the boundary of the circles we can control the separability of the problem. Starting with an almost non-separable scenario, the data quickly becomes separable as the distance between the circles increases.
End of explanation
"""
def train_mkl(circles, feats_tr):
#Four kernels with different widths
kernel0=sg.kernel("GaussianKernel", log_width=np.log(1))
kernel1=sg.kernel("GaussianKernel", log_width=np.log(5))
kernel2=sg.kernel("GaussianKernel", log_width=np.log(7))
kernel3=sg.kernel("GaussianKernel", log_width=np.log(10))
kernel = CombinedKernel()
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.append_kernel(kernel2)
kernel.append_kernel(kernel3)
kernel.init(feats_tr, feats_tr)
mkl = MKLClassification()
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(lab)
mkl.train()
w=kernel.get_subkernel_weights()
return w, mkl
def test_mkl(mkl, grid):
kernel0t=sg.kernel("GaussianKernel", log_width=np.log(1))
kernel1t=sg.kernel("GaussianKernel", log_width=np.log(5))
kernel2t=sg.kernel("GaussianKernel", log_width=np.log(7))
kernel3t=sg.kernel("GaussianKernel", log_width=np.log(10))
kernelt = CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
kernelt.append_kernel(kernel2t)
kernelt.append_kernel(kernel3t)
kernelt.init(feats_tr, grid)
mkl.set_kernel(kernelt)
out=mkl.apply()
return out
size=50
x1=linspace(-10, 10, size)
x2=linspace(-10, 10, size)
x, y=meshgrid(x1, x2)
grid=features(array((ravel(x), ravel(y))))
w, mkl=train_mkl(c, feats_tr)
print(w)
out=test_mkl(mkl,grid)
z=out.get_values().reshape((size, size))
figure(figsize=(5,5))
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
title('classification with constant separation')
_=colorbar(c)
"""
Explanation: These are the type of circles we want to distinguish between. We can try classification with a constant separation between the circles first.
End of explanation
"""
range1=linspace(5.5,7.5,50)
x=linspace(1.5,3.5,50)
temp=[]
for i in range1:
#vary separation between circles
c, feats=get_data(4,i)
w, mkl=train_mkl(c, feats)
temp.append(w)
y=array([temp[i] for i in range(0,50)]).T
figure(figsize=(20,5))
_=plot(x, y[0,:], color='k', linewidth=2)
_=plot(x, y[1,:], color='r', linewidth=2)
_=plot(x, y[2,:], color='g', linewidth=2)
_=plot(x, y[3,:], color='y', linewidth=2)
title("Comparison between kernel widths and weights")
ylabel("Weight")
xlabel("Distance between circles")
_=legend(["1","5","7","10"])
"""
Explanation: As we can see the MKL classifier classifies them as expected. Now let's vary the separation and see how it affects the weights.The choice of the kernel width of the Gaussian kernel used for classification is expected to depend on the separation distance of the learning problem. An increased distance between the circles will correspond to a larger optimal kernel width. This effect should be visible in the results of the MKL, where we used MKL-SVMs with four kernels with different widths (1,5,7,10).
End of explanation
"""
from scipy.io import loadmat, savemat
from os import path, sep
mat = loadmat(sep.join(['..','..','..','data','multiclass', 'usps.mat']))
Xall = mat['data']
Yall = array(mat['label'].squeeze(), dtype=double)
# map from 1..10 to 0..9, since shogun
# requires multiclass labels to be
# 0, 1, ..., K-1
Yall = Yall - 1
random.seed(0)
subset = random.permutation(len(Yall))
#get first 1000 examples
Xtrain = Xall[:, subset[:1000]]
Ytrain = Yall[subset[:1000]]
Nsplit = 2
all_ks = range(1, 21)
print(Xall.shape)
print(Xtrain.shape)
"""
Explanation: In the above plot we see the kernel weightings obtained for the four kernels. Every line shows one weighting. The courses of the kernel weightings reflect the development of the learning problem: as long as the problem is difficult the best separation can be obtained when using the kernel with smallest width. The low width kernel looses importance when the distance between the circle increases and larger kernel widths obtain a larger weight in MKL. Increasing the distance between the circles, kernels with greater widths are used.
Multiclass classification using MKL
MKL can be used for multiclass classification using the MKLMulticlass class. It is based on the GMNPSVM Multiclass SVM. Its termination criterion is set by set_mkl_epsilon(float64_t eps ) and the maximal number of MKL iterations is set by set_max_num_mkliters(int32_t maxnum). The epsilon termination criterion is the L2 norm between the current MKL weights and their counterpart from the previous iteration. We set it to 0.001 as we want pretty accurate weights.
To see this in action let us compare it to the normal GMNPSVM example as in the KNN notebook, just to see how MKL fares in object recognition. We use the USPS digit recognition dataset.
End of explanation
"""
def plot_example(dat, lab):
for i in range(5):
ax=subplot(1,5,i+1)
title(int(lab[i]))
ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')
ax.set_xticks([])
ax.set_yticks([])
_=figure(figsize=(17,6))
gray()
plot_example(Xtrain, Ytrain)
"""
Explanation: Let's plot five of the examples to get a feel of the dataset.
End of explanation
"""
# MKL training and output
labels = MulticlassLabels(Ytrain)
feats = features(Xtrain)
#get test data from 5500 onwards
Xrem=Xall[:,subset[5500:]]
Yrem=Yall[subset[5500:]]
#test features not used in training
feats_rem=features(Xrem)
labels_rem=MulticlassLabels(Yrem)
kernel = CombinedKernel()
feats_train = CombinedFeatures()
feats_test = CombinedFeatures()
#append gaussian kernel
subkernel = sg.kernel("GaussianKernel", log_width=np.log(15))
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = features(Xtrain)
subkernel = sg.kernel('PolyKernel', degree=10, c=2)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = MKLMulticlass(1.2, kernel, labels)
mkl.set_epsilon(1e-2)
mkl.set_mkl_epsilon(0.001)
mkl.set_mkl_norm(1)
mkl.train()
#initialize with test features
kernel.init(feats_train, feats_test)
out = mkl.apply()
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
w=kernel.get_subkernel_weights()
print(w)
# Single kernel:PolyKernel
C=1
pk = sg.kernel('PolyKernel', degree=10, c=2)
svm=GMNPSVM(C, pk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
#Single Kernel:Gaussian kernel
width=15
C=1
gk=sg.kernel("GaussianKernel", log_width=np.log(width))
svm=GMNPSVM(C, gk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
"""
Explanation: We combine a Gaussian kernel and a PolyKernel. To test, examples not included in training data are used.
This is just a demonstration but we can see here how MKL is working behind the scene. What we have is two kernels with significantly different properties. The gaussian kernel defines a function space that is a lot larger than that of the linear kernel or the polynomial kernel. The gaussian kernel has a low width, so it will be able to represent more and more complex relationships between the training data. But it requires enough data to train on. The number of training examples here is 1000, which seems a bit less as total examples are 10000. We hope the polynomial kernel can counter this problem, since it will fit the polynomial for you using a lot less data than the squared exponential. The kernel weights are printed below to add some insight.
End of explanation
"""
X = -0.3 * random.randn(100,2)
traindata=r_[X + 2, X - 2].T
X = -0.3 * random.randn(20, 2)
testdata = r_[X + 2, X - 2].T
trainlab=concatenate((ones(99),-ones(1)))
#convert to shogun features and generate labels for data
feats=features(traindata)
labels=BinaryLabels(trainlab)
xx, yy = meshgrid(linspace(-5, 5, 500), linspace(-5, 5, 500))
grid=features(array((ravel(xx), ravel(yy))))
#test features
feats_t=features(testdata)
x_out=(random.uniform(low=-4, high=4, size=(20, 2))).T
feats_out=features(x_out)
kernel=CombinedKernel()
feats_train=CombinedFeatures()
feats_test=CombinedFeatures()
feats_test_out=CombinedFeatures()
feats_grid=CombinedFeatures()
#append gaussian kernel
subkernel=sg.kernel("GaussianKernel", log_width=np.log(8))
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = features(traindata)
subkernel = sg.kernel('PolyKernel', degree=10, c=3)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = MKLOneClass()
mkl.set_kernel(kernel)
mkl.set_labels(labels)
mkl.set_interleaved_optimization_enabled(False)
mkl.set_epsilon(1e-2)
mkl.put('mkl_epsilon', 0.1)
mkl.set_mkl_norm(1)
"""
Explanation: The misclassified examples are surely pretty tough to predict. As seen from the accuracy MKL seems to work a shade better in the case. One could try this out with more and different types of kernels too.
One-class classification using MKL
One-class classification can be done using MKL in shogun. This is demonstrated in the following simple example using MKLOneClass. We will see how abnormal data is detected. This is also known as novelty detection. Below we generate some toy data and initialize combined kernels and features.
End of explanation
"""
mkl.train()
print("Weights:")
w=kernel.get_subkernel_weights()
print(w)
#initialize with test features
kernel.init(feats_train, feats_test)
normal_out = mkl.apply()
#test on abnormally generated data
kernel.init(feats_train, feats_test_out)
abnormal_out = mkl.apply()
#test on X-Y grid
kernel.init(feats_train, feats_grid)
grid_out=mkl.apply()
z=grid_out.get_values().reshape((500,500))
z_lab=grid_out.get_labels().reshape((500,500))
a=abnormal_out.get_labels()
n=normal_out.get_labels()
#check for normal and abnormal classified data
idx=where(normal_out.get_labels() != 1)[0]
abnormal=testdata[:,idx]
idx=where(normal_out.get_labels() == 1)[0]
normal=testdata[:,idx]
figure(figsize(15,6))
pl =subplot(121)
title("One-class classification using MKL")
_=pink()
c=pcolor(xx, yy, z)
_=contour(xx, yy, z_lab, linewidths=1, colors='black', hold=True)
_=colorbar(c)
p1=pl.scatter(traindata[0, :], traindata[1,:], cmap=gray(), s=100)
p2=pl.scatter(normal[0,:], normal[1,:], c="red", s=100)
p3=pl.scatter(abnormal[0,:], abnormal[1,:], c="blue", s=100)
p4=pl.scatter(x_out[0,:], x_out[1,:], c=a, cmap=jet(), s=100)
_=pl.legend((p1, p2, p3), ["Training samples", "normal samples", "abnormal samples"], loc=2)
subplot(122)
c=pcolor(xx, yy, z)
title("One-class classification output")
_=gray()
_=contour(xx, yy, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
"""
Explanation: Now that everything is initialized, let's see MKLOneclass in action by applying it on the test data and on the X-Y grid.
End of explanation
"""
|
zrhans/python | exemplos/googlecode-day-python/google-python-class-day1-p3.ipynb | gpl-2.0 | # Criando um dicionario vazio
d = {}
# Adicionando elementos para chave-valor
d['a'] = 'alpha'
d['o'] = 'omega'
d['g'] = 'gamma'
# algumas propriedades uteis
d
#Exibindo as chaves
d.keys()
# Iterando sobre as chaves
for k in d.keys(): print 'Key:',k,'->',d[k]
#Exibindo os valores
d.values()
#Exibindo os itens
d.items()
"""
Explanation: Google Python Class Day 1 Part 3
Fonte: Youtube
Topico:
Hash table ou Dicionarios
Dicionarios
End of explanation
"""
# Iterando sobre as Tulas que representam o par chave-valor
for tup in d.items(): print(tup)
"""
Explanation: Observe que os itens sao apresentados na forma de Tuplas representando o par chave-valor**
End of explanation
"""
|
cielling/jupyternbs | analyze_stockdata_testing.ipynb | agpl-3.0 | import sqlite3
conn3 = sqlite3.connect('edgar_idx.db')
cursor=conn3.cursor()
"""
Explanation: Setting up for testing
Use sqlite3 to connect to the edgar_idx database
End of explanation
"""
ticker = "MMM"
"""
Explanation: Set the ticker and pull out the list of 10-Q's and 10-K's for it from the database. Save each to a CSV file.
End of explanation
"""
from MyEdgarDb import get_list_sec_filings, get_cik_ticker_lookup_db, lookup_cik_ticker
print("Updating master index.")
get_list_sec_filings ()
print("Updating CIK-ticker lookup table.")
get_cik_ticker_lookup_db ()
#cursor.execute('''SELECT * FROM idx WHERE Symbol=?;''', ("ABBV",))
cursor.execute('''SELECT * FROM cik_ticker_name WHERE ticker=?;''',(ticker,))
res=cursor.fetchall()
print(res)
cursor.execute('''SELECT * FROM idx WHERE cik=?;''', (res[0][0],))
recs = cursor.fetchall()
names = list(map(lambda x: x[0], cursor.description))
print(names)
df = pd.DataFrame(data=recs, columns=names)
df['date'] = pd.to_datetime(df['date'])
df.columns
df.size
df.dtypes
## Sort by date in descending order (most recent is first)
df.sort_values(by=['date'], inplace=True, ascending=False)
#print(type(recs))
print(recs)
#conn3.close()
df[df.type == "10-Q"].to_csv("TestData\\"+ticker.lower()+"_all_10qs.csv", index=None)
df[df.type == "10-K"].to_csv("TestData\\"+ticker.lower()+"_all_10ks.csv", index=None)
#df[df.type == "20-F"].to_csv("TestData\\"+ticker.lower()+"_all_20fs.csv", index=None)
"""
Explanation: If necessary, update the idx and cik_ticker_name tables in the database
End of explanation
"""
all_10Ks =pd.read_csv("TestData\\{:s}_all_10ks.csv".format(ticker.lower()), parse_dates=['date'], dtype={'cik':str, 'conm':str, 'type':str,'path':str})
all_10Qs =pd.read_csv("TestData\\{:s}_all_10qs.csv".format(ticker.lower()), parse_dates=['date'], dtype={'cik':str, 'conm':str, 'type':str,'path':str})
"""
Explanation: The thus created CSV files can be read back in
End of explanation
"""
from CanslimParams import CanslimParams
canslim = CanslimParams(ticker, all_10Qs, all_10Ks)
canslim.loadData()
"""
Explanation: Verifying Canslim is correctly loaded/populated
First, create the Canslim object, which should load all SecFilings*
End of explanation
"""
print(canslim.n10Ks)
print(canslim.n10Qs)
print(canslim.errorLog)
"""
Explanation: Print some diagnostics
End of explanation
"""
for i in range(0, canslim.n10Qs):
print(canslim.getEpsQuarter(i))
for i in range(0, canslim.n10Ks):
print(canslim.getEpsAnnual(i))
"""
Explanation: Print EPS's for all quarters and years
End of explanation
"""
print(canslim.quartersList)
print(canslim.yearsList)
"""
Explanation: After the SecFilings have actually been accessed (through getting the EPS values), the quartersList and yearsList should be populated now.
End of explanation
"""
ticker = "OSK" ## Note that this will overwrite the `ticker` set above
from os import path, getenv
#file_path = path.join("C:", "Users", "Carola", "jupyternbs", "SECDATA", "OSHKOSH CORP", "775158_OSHKOSH CORP_10-Q_2019-08-000000")
file_path = "C:\\Users\\Carola\\jupyternbs\\SECDATA\\OSHKOSH CORP\\775158_OSHKOSH CORP_10-Q_2019-08-000000"
from SecFiling10Q import SecFiling10Q
filing = SecFiling10Q(ticker)
filing.load(file_path)
"""
Explanation: Verifying that a 10Q filing is loaded correctly
First, set the correct path to the filing to load (be sure to update the last two arguments, if necessary).
Then, load the SecFiling.
End of explanation
"""
print(filing.getReportDate())
print("Eps=", filing.getEps())
"""
Explanation: Now we can print some things that should be loaded from the filing.
End of explanation
"""
type(filing.all_tags)
c=0
for t in filing.all_tags:
if 'us-gaap:IncomeLossFromContinuingOperationsPerBasicShare'.lower() in t.name.lower():
print(t)
c+=1
print(c)
"""
Explanation: Print some diagnostics (all_tags contains the section from the filing-file that should contain all the relevant tags)
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
from datetime import date, timedelta
"""
Explanation: Verifying calculated data
End of explanation
"""
## NVDA
#y= np.array([3123000000, 3207000000, 2911000000, 2636000000, 2230000000]) #sales
#y= np.array([1.81, 2.05, 1.86, 1.39, 0.98]) #eps
#x=np.array([0.0, -91, -182, -273, -364])
## AAPL
#se = np.array([114949, 126878, 140199]) * 1000.0
#ni = np.array([11519, 13822, 20065 ]) * 1000000.0
#sales = np.array([53265, 61137, 88293, 52579, 45408, 52896, 78351]) * 1000000.0
#eps = np.array([2.36, 2.75, 3.92, 2.18, 1.68, 2.11, 3.38])
## ACLS
#se= np.array([397074, 385614, 371527])*1000.0
#ni = np.array([8838, 14669, 13915])*1000.0
#sales = np.array([95374, 119333, 122185])*1000.0
#eps = np.array([0.95, 2.26, 1.71])
#days = np.array([0.0, -91, -182, -273, -364, -455, -546])
## MMM
eps = np.array([1.54, 2.3, 2.64, 3.14, 1.01, 0.88, 2.39, 2.65, 2.21, 1.92, 2.2, 2.13, 2.1, 1.69, 2.09, 2.06, 1.88])
dates=np.array([date(2019, 3, 31), date(2018, 12, 31), date(2018, 9, 30), date(2018, 6, 30), date(2018, 3, 31), date(2017, 12, 31), date(2017, 9, 30), date(2017, 6, 30), date(2017, 3, 31), date(2016, 12, 31), date(2016, 9, 30), date(2016, 6, 30), date(2016, 3, 31), date(2015, 12, 31), date(2015, 9, 30), date(2015, 6, 30), date(2015, 3, 31)])
sales=np.array([7863000000, 7945000000, 8152000000, 8390000000, 8278000000, 8008000000, 8172000000, 7810000000, 7685000000, 7329000000, 7709000000, 7662000000, 7409000000, 7298000000, 7712000000, 7686000000, 7578000000])
se=np.array([9703000000, 10407000000, 10248000000, 10365000000, 10977000000, 11672000000, 12146000000, 11591000000, 10989000000, 11316000000, 12002000000, 11894000000, 11733000000, 12484000000, 12186000000, 13093000000, 13917000000])
ni=np.array([891000000, 1361000000, 1543000000, 1857000000, 602000000, 534000000, 1429000000, 1583000000, 1323000000, 1163000000, 1329000000, 1291000000, 1275000000, 1041000000, 1296000000, 1303000000, 1201000000])
delta=dates-dates[0]
l=[]
for d in delta:
l.append(d.days)
days=np.array(l)
np.sum(eps[0:4]) / np.average(eps[0:4])
"""
Explanation: Verifying "Stability of EPS Growth"
First, enter all the values into numpy-arrays. Hint: if these are in an Excel spreadsheet, one can copy+paste values only, then copy+paste transpose. Then, one can copy+paste these into notepad++ and find+replace tabs with commas, etc. Then paste here.
For dates: format in Excel spreadsheet. in Notepad++: search for ([0-9]*-[0-9]*-[0-9]*), replace with date\(\1\)
End of explanation
"""
y = eps[:-5]
x = days[:-5]
## Fit a polynomial of degree 2 through the data: ax**2 + bx + c. 'a' should be the acceleration
p = np.polyfit(x, y, 2)
## Calculate fitted y-values
yfit = np.polyval(p, x)
## Calculate the "error"
sigma = (y - yfit) / y
error = sigma * sigma
res = error.sum()
print("Total error (sum(sigma_i^2)): {:g}".format(res))
plt.plot(x,y)
plt.plot(x,yfit)
plt.show()
print(p)
print(error)
print(res)
print((sales[2] - sales[1])/(days[2] - days[1]))
print((eps[2] - eps[1])/(days[2] - days[1]))
print((y[2] - y[0])/(x[2] - x[0]))
"""
Explanation: Then, the data can be fitted. Set x and y to the appropriate data (ranges) to be fitted.
End of explanation
"""
|
aldian/tensorflow | tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
!pip install tflite-model-maker
"""
Explanation: Text classification with TensorFlow Lite Model Maker
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/tutorials/model_maker_text_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
The TensorFlow Lite Model Maker library simplifies the process of adapting and converting a TensorFlow model to particular input data when deploying this model for on-device ML applications.
This notebook shows an end-to-end example that utilizes the Model Maker library to illustrate the adaptation and conversion of a commonly-used text classification model to classify movie reviews on a mobile device. The text classification model classifies text into predefined categories.The inputs should be preprocessed text and the outputs are the probabilities of the categories. The dataset used in this tutorial are positive and negative movie reviews.
Prerequisites
Install the required packages
To run this example, install the required packages, including the Model Maker package from the GitHub repo.
End of explanation
"""
import numpy as np
import os
import tensorflow as tf
assert tf.__version__.startswith('2')
from tflite_model_maker import configs
from tflite_model_maker import model_spec
from tflite_model_maker import text_classifier
from tflite_model_maker import TextClassifierDataLoader
"""
Explanation: Import the required packages.
End of explanation
"""
data_dir = tf.keras.utils.get_file(
fname='SST-2.zip',
origin='https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
extract=True)
data_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')
"""
Explanation: Get the data path
Download the dataset for this tutorial.
End of explanation
"""
spec = model_spec.get('mobilebert_classifier')
"""
Explanation: You can also upload your own dataset to work through this tutorial. Upload your dataset by using the left sidebar in Colab.
<img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_text_classification.png" alt="Upload File" width="800" hspace="100">
If you prefer not to upload your dataset to the cloud, you can also locally run the library by following the guide.
End-to-End Workflow
This workflow consists of five steps as outlined below:
Step 1. Choose a model specification that represents a text classification model.
This tutorial uses MobileBERT as an example.
End of explanation
"""
train_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'train.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=True)
test_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'dev.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=False)
"""
Explanation: Step 2. Load train and test data specific to an on-device ML app and preprocess the data according to a specific model_spec.
End of explanation
"""
model = text_classifier.create(train_data, model_spec=spec)
"""
Explanation: Step 3. Customize the TensorFlow model.
End of explanation
"""
loss, acc = model.evaluate(test_data)
"""
Explanation: Step 4. Evaluate the model.
End of explanation
"""
config = configs.QuantizationConfig.create_dynamic_range_quantization(optimizations=[tf.lite.Optimize.OPTIMIZE_FOR_LATENCY])
config._experimental_new_quantizer = True
model.export(export_dir='mobilebert/', quantization_config=config)
"""
Explanation: Step 5. Export as a TensorFlow Lite model.
Since MobileBERT is too big for on-device applications, use dynamic range quantization on the model to compress it by almost 4x with minimal performance degradation.
End of explanation
"""
spec = model_spec.get('average_word_vec')
"""
Explanation: You can also download the model using the left sidebar in Colab.
After executing the 5 steps above, you can further use the TensorFlow Lite model file and label file in on-device applications like in a text classification reference app.
The following sections walk through the example step by step to show more detail.
Choose a model_spec that Represents a Model for Text Classifier
Each model_spec object represents a specific model for the text classifier. TensorFlow Lite Model Maker currently supports MobileBERT, averaging word embeddings and [BERT-Base]((https://arxiv.org/pdf/1810.04805.pdf) models.
Supported Model | Name of model_spec | Model Description
--- | --- | ---
MobileBERT | 'mobilebert_classifier' | 4.3x smaller and 5.5x faster than BERT-Base while achieving competitive results, suitable for on-device applications.
BERT-Base | 'bert_classifier' | Standard BERT model that is widely used in NLP tasks.
averaging word embedding | 'average_word_vec' | Averaging text word embeddings with RELU activation.
This tutorial uses a smaller model, average_word_vec that you can retrain multiple times to demonstrate the process.
End of explanation
"""
data_dir = tf.keras.utils.get_file(
fname='SST-2.zip',
origin='https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
extract=True)
data_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')
"""
Explanation: Load Input Data Specific to an On-device ML App
The SST-2 (Stanford Sentiment Treebank) is one of the tasks in the GLUE benchmark . It contains 67,349 movie reviews for training and 872 movie reviews for validation. The dataset has two classes: positive and negative movie reviews.
Download the archived version of the dataset and extract it.
End of explanation
"""
train_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'train.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=True)
test_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'dev.tsv')),
text_column='sentence',
label_column='label',
model_spec=spec,
delimiter='\t',
is_training=False)
"""
Explanation: The SST-2 dataset has train.tsv for training and dev.tsv for validation. The files have the following format:
sentence | label
--- | ---
it 's a charming and often affecting journey . | 1
unflinchingly bleak and desperate | 0
A positive review is labeled 1 and a negative review is labeled 0.
Use the TestClassifierDataLoader.from_csv method to load the data.
End of explanation
"""
model = text_classifier.create(train_data, model_spec=spec, epochs=10)
"""
Explanation: The Model Maker library also supports the from_folder() method to load data. It assumes that the text data of the same class are in the same subdirectory and that the subfolder name is the class name. Each text file contains one movie review sample. The class_labels parameter is used to specify which the subfolders.
Customize the TensorFlow Model
Create a custom text classifier model based on the loaded data.
End of explanation
"""
model.summary()
"""
Explanation: Examine the detailed model structure.
End of explanation
"""
loss, acc = model.evaluate(test_data)
"""
Explanation: Evaluate the Customized Model
Evaluate the result of the model and get the loss and accuracy of the model.
Evaluate the loss and accuracy in the test data.
End of explanation
"""
model.export(export_dir='average_word_vec/')
"""
Explanation: Export as a TensorFlow Lite Model
Convert the existing model to TensorFlow Lite model format that you can later use in an on-device ML application. Save the text labels in a label file and vocabulary in a vocab file. The default TFLite filename is model.tflite, the default label filename is label.txt and the default vocab filename is vocab.
End of explanation
"""
model.evaluate_tflite('average_word_vec/model.tflite', test_data)
"""
Explanation: The TensorFlow Lite model file and label file can be used in the text classification reference app by adding model.tflite, text_label.txt and vocab.txt to the assets directory. Do not forget to also change the filenames in the code.
You can evalute the tflite model with evaluate_tflite method.
End of explanation
"""
new_model_spec = model_spec.AverageWordVecModelSpec(wordvec_dim=32)
"""
Explanation: Advanced Usage
The create function is the driver function that the Model Maker library uses to create models. The model spec parameter defines the model specification. The AverageWordVecModelSpec and BertClassifierModelSpec classes are currently supported. The create function comprises of the following steps:
Creates the model for the text classifier according to model_spec.
Trains the classifier model. The default epochs and the default batch size are set by the default_training_epochs and default_batch_size variables in the model_spec object.
This section covers advanced usage topics like adjusting the model and the training hyperparameters.
Adjust the model
You can adjust the model infrastructure like the wordvec_dim and the seq_len variables in the AverageWordVecModelSpec class.
For example, you can train the model with a larger value of wordvec_dim. Note that you must construct a new model_spec if you modify the model.
End of explanation
"""
new_train_data = TextClassifierDataLoader.from_csv(
filename=os.path.join(os.path.join(data_dir, 'train.tsv')),
text_column='sentence',
label_column='label',
model_spec=new_model_spec,
delimiter='\t',
is_training=True)
"""
Explanation: Get the preprocessed data.
End of explanation
"""
model = text_classifier.create(new_train_data, model_spec=new_model_spec)
"""
Explanation: Train the new model.
End of explanation
"""
new_model_spec = model_spec.get('mobilebert_classifier')
new_model_spec.seq_len = 256
"""
Explanation: You can also adjust the MobileBERT model.
The model parameters you can adjust are:
seq_len: Length of the sequence to feed into the model.
initializer_range: The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
trainable: Boolean that specifies whether the pre-trained layer is trainable.
The training pipeline parameters you can adjust are:
model_dir: The location of the model checkpoint files. If not set, a temporary directory will be used.
dropout_rate: The dropout rate.
learning_rate: The initial learning rate for the Adam optimizer.
tpu: TPU address to connect to.
For instance, you can set the seq_len=256 (default is 128). This allows the model to classify longer text.
End of explanation
"""
model = text_classifier.create(train_data, model_spec=spec, epochs=20)
"""
Explanation: Tune the training hyperparameters
You can also tune the training hyperparameters like epochs and batch_size that affect the model accuracy. For instance,
epochs: more epochs could achieve better accuracy, but may lead to overfitting.
batch_size: the number of samples to use in one training step.
For example, you can train with more epochs.
End of explanation
"""
loss, accuracy = model.evaluate(test_data)
"""
Explanation: Evaluate the newly retrained model with 20 training epochs.
End of explanation
"""
spec = model_spec.get('bert_classifier')
"""
Explanation: Change the Model Architecture
You can change the model by changing the model_spec. The following shows how to change to BERT-Base model.
Change the model_spec to BERT-Base model for the text classifier.
End of explanation
"""
|
diegocavalca/Studies | phd-thesis/nilmtk/disaggregation_and_metrics.ipynb | cc0-1.0 | from __future__ import print_function, division
import time
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from six import iteritems
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.disaggregate import CombinatorialOptimisation, FHMM
import nilmtk.utils
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
rcParams['figure.figsize'] = (13, 6)
"""
Explanation: Disaggregation
End of explanation
"""
train = DataSet('../datasets/REDD/low_freq.h5')
test = DataSet('../datasets/REDD/low_freq.h5')
"""
Explanation: Dividing data into train and test set
End of explanation
"""
building = 1
"""
Explanation: Let us use building 1 for demo purposes
End of explanation
"""
train.set_window(end="2011-04-30")
test.set_window(start="2011-04-30")
train_elec = train.buildings[1].elec
test_elec = test.buildings[1].elec
"""
Explanation: Let's split data at April 30th
End of explanation
"""
print(' TRAIN MAINS')
train_elec.mains().plot();
print(' TRAIN APPLIANCES')
train_elec.submeters().plot();
"""
Explanation: Visualizing the data
Train
End of explanation
"""
print(' TEST MAINS')
test_elec.mains().plot();
print(' TEST APPLIANCES')
test_elec.submeters().plot();
"""
Explanation: Test
End of explanation
"""
fridge_meter = train_elec['fridge']
fridge_df = next(fridge_meter.load())
fridge_df.head()
mains = train_elec.mains()
mains_df = next(mains.load())
mains_df.head()
"""
Explanation: REDD data set has got appliance level data sampled every 3 or 4 seconds and mains data sampled every 1 second. Let us verify the same.
End of explanation
"""
top_5_train_elec = train_elec.submeters().select_top_k(k=5)
top_5_train_elec
"""
Explanation: Since, both of these are sampled at different frequencies, we will downsample both to 1 minute resolution. We will also select the top-5 appliances in terms of energy consumption and use them for training our FHMM and CO models.
Selecting top-5 appliances
End of explanation
"""
def predict(clf, test_elec, sample_period, timezone):
pred = {}
gt= {}
# "ac_type" varies according to the dataset used.
# Make sure to use the correct ac_type before using the default parameters in this code.
for i, chunk in enumerate(test_elec.mains().load(physical_quantity = 'power', ac_type = 'apparent', sample_period=sample_period)):
chunk_drop_na = chunk.dropna()
pred[i] = clf.disaggregate_chunk(chunk_drop_na)
gt[i]={}
for meter in test_elec.submeters().meters:
# Only use the meters that we trained on (this saves time!)
gt[i][meter] = next(meter.load(physical_quantity = 'power', ac_type = 'active', sample_period=sample_period))
gt[i] = pd.DataFrame({k:v.squeeze() for k,v in iteritems(gt[i]) if len(v)}, index=next(iter(gt[i].values())).index).dropna()
# If everything can fit in memory
gt_overall = pd.concat(gt)
gt_overall.index = gt_overall.index.droplevel()
pred_overall = pd.concat(pred)
pred_overall.index = pred_overall.index.droplevel()
# Having the same order of columns
gt_overall = gt_overall[pred_overall.columns]
#Intersection of index
gt_index_utc = gt_overall.index.tz_convert("UTC")
pred_index_utc = pred_overall.index.tz_convert("UTC")
common_index_utc = gt_index_utc.intersection(pred_index_utc)
common_index_local = common_index_utc.tz_convert(timezone)
gt_overall = gt_overall.loc[common_index_local]
pred_overall = pred_overall.loc[common_index_local]
appliance_labels = [m for m in gt_overall.columns.values]
gt_overall.columns = appliance_labels
pred_overall.columns = appliance_labels
return gt_overall, pred_overall
"""
Explanation: Training and disaggregation
A function to disaggregate the mains data to constituent appliances and return the predictions
End of explanation
"""
classifiers = {'CO':CombinatorialOptimisation(), 'FHMM':FHMM()}
predictions = {}
sample_period = 120
for clf_name, clf in classifiers.items():
print("*"*20)
print(clf_name)
print("*" *20)
start = time.time()
# Note that we have given the sample period to downsample the data to 1 minute.
# If instead of top_5 we wanted to train on all appliance, we would write
# fhmm.train(train_elec, sample_period=60)
clf.train(top_5_train_elec, sample_period=sample_period)
end = time.time()
print("Runtime =", end-start, "seconds.")
gt, predictions[clf_name] = predict(clf, test_elec, sample_period, train.metadata['timezone'])
"""
Explanation: Train using 2 benchmarking algorithms - Combinatorial Optimisation (CO) and Factorial Hidden Markov Model (FHMM)
End of explanation
"""
appliance_labels = [m.label() for m in gt.columns.values]
gt.columns = appliance_labels
predictions['CO'].columns = appliance_labels
predictions['FHMM'].columns = appliance_labels
"""
Explanation: Using prettier labels!
End of explanation
"""
gt.head()
predictions['CO'].head()
predictions['FHMM'].head()
"""
Explanation: Taking a look at the ground truth of top 5 appliance power consumption
End of explanation
"""
predictions['CO']['Fridge'].head(300).plot(label="Pred")
gt['Fridge'].head(300).plot(label="GT")
plt.legend()
predictions['FHMM']['Fridge'].head(300).plot(label="Pred")
gt['Fridge'].head(300).plot(label="GT")
plt.legend()
"""
Explanation: Plotting the predictions against the actual usage
End of explanation
"""
? nilmtk.utils.compute_rmse
rmse = {}
for clf_name in classifiers.keys():
rmse[clf_name] = nilmtk.utils.compute_rmse(gt, predictions[clf_name])
rmse = pd.DataFrame(rmse)
rmse
"""
Explanation: Comparing NILM algorithms (CO vs FHMM)
nilmtk.utils.compute_rmse is an extended of the following, handling both missing values and labels better:
python
def compute_rmse(gt, pred):
from sklearn.metrics import mean_squared_error
rms_error = {}
for appliance in gt.columns:
rms_error[appliance] = np.sqrt(mean_squared_error(gt[appliance], pred[appliance]))
return pd.Series(rms_error)
End of explanation
"""
|
ffpenaloza/AstroExp | tarea5/tarea5.ipynb | gpl-3.0 | from astropy.io import fits
import numpy as np
f475 = fits.open('hst_9401_02_acs_wfc_f475w_drz.fits')
f850 = fits.open('hst_9401_02_acs_wfc_f850lp_drz.fits')
f475[1].writeto('sci_f475w_m87.fits',clobber=True)
f475[2].writeto('invvar_f475w_m87.fits',clobber=True)
f850[1].writeto('sci_f850lp_m87.fits',clobber=True)
f850[2].writeto('invvar_f850lp_m87.fits',clobber=True)
f475.close()
f850.close()
!sextractor sci_f475w_m87.fits -c f475w.sex
!sextractor sci_f850lp_m87.fits -c f850lp.sex
"""
Explanation: Tarea 5
Luego de descargar las imágenes en los filtros F475W y F850LP del objeto VCC1316 (M87) se siguen los pasos de la primera tarea para generar el catálogo.
De Sirianni et. al (2005) se obtiene la escala de WFC (0.05''/px) y los zeropoint en el sistema AB (según la tabla 10) y se ejecuta Sextractor.
Se corrige por apertura según la tabla 3 (2 pixeles de radio a escala de 0.05''/px corresponden a 0.1'').
Se corrige por reddening (para un SED tipo E) según la tabla 14 y el valor (B-V) de NED
End of explanation
"""
from astropy import units as u
from astropy.coordinates import SkyCoord
# Se cargan listas con RA y DEC para cada imagen
RA475 = np.loadtxt('f475w.cat',usecols=(3,))
DE475 = np.loadtxt('f475w.cat',usecols=(4,))
RA850 = np.loadtxt('f850lp.cat',usecols=(3,))
DE850 = np.loadtxt('f850lp.cat',usecols=(4,))
# Match por parte de astropy. El catalogo del filtro f850lp contiene mas objetos
c = SkyCoord(ra=RA475*u.degree, dec=DE475*u.degree)
catalog = SkyCoord(ra=RA850*u.degree, dec=DE850*u.degree)
idx = c.match_to_catalog_sky(catalog)
# Del catalogo f475w.cat se extraen las filas que indica el match
matches = list(idx[0])
f475w = np.loadtxt('f475w.cat')
f850lp = np.loadtxt('f850lp.cat')
out = []
BV = 0.083-0.063
j = 0
for i in matches:
out.append(np.concatenate(
[f475w[j]+ 2.5*np.log10(0.669)- (3.591*BV),
f850lp[i]+ 2.5*np.log10(0.538)- (1.472*BV)]))
j = j+1
# Salida a archivo
np.savetxt('m87_match_f475w_f850lp.cat',out,
fmt='%d\t%.4f\t%.4f\t%.7f\t%.7f\t%d\t%.4f\t%.4f\t%.7f\t%.7f',
header='f475wN\tf475wMAG\tf475wMAGERR\tf475wALPHA\tf475wDELTA\tf850lpN\tf850lpMAG\tf850lpMAGERR\tf850lpALPHA\tf814wDELTA')
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
from astropy.io import ascii
tbl = ascii.read('m87_match_f475w_f850lp.cat')
plt.figure(figsize=(10,10))
plt.hist(tbl["f475wMAG"] - tbl["f850lpMAG"], bins=220)
plt.xlabel("$m_{F475W} - m_{F850LP}$", fontsize=20)
plt.ylabel("N", fontsize=20)
plt.xlim(0, 2)
plt.show()
plt.close()
plt.figure(figsize=(10,10))
plt.hist(tbl["f475wMAG"], histtype = 'step', color='b',label='$mF475W$',bins=50)
plt.hist(tbl["f850lpMAG"], histtype = 'step', color='r',label='$mF850LP$',bins=50)
plt.legend()
plt.xticks(list(plt.xticks()[0]) + [24.3])
plt.axvline(x=24.3,linewidth=2, color='g')
plt.xlabel("$m_{F475W}, m_{F850LP}$", fontsize=20)
plt.ylabel("N", fontsize=20)
plt.show()
plt.close()
"""
Explanation: Se permitió que Sextractor quitara la galaxia según la estimación del cielo. Se deja la check image (-background) como ejemplo de resultado. Funciona bien excepto para el jet y para el centro de la galaxia. Las detecciones están gobernadas por cúmulos globulares.
<img src="ds9.jpeg" width="500">
End of explanation
"""
m = 24.3
dm = m+8.4
print dm
print 10**((dm+5)/5)
"""
Explanation: Para encontrar la distancia se recurre al dato de magnitud absoluta esperada de $-8.4$ (Jordán et al (2006)) y la magnitud aparente obtenida del histograma $24.3$
End of explanation
"""
!sextractor chandra.fits -c chandra.sex
RAchan = np.loadtxt('chandra.cat',usecols=(3,))
DEchan = np.loadtxt('chandra.cat',usecols=(4,))
# Match por parte de astropy. El catalogo de chandra contiene menos objetos
c = SkyCoord(ra=RAchan*u.degree, dec=DEchan*u.degree)
catalog = SkyCoord(ra=RA850*u.degree, dec=DE850*u.degree)
idx = c.match_to_catalog_sky(catalog)
# Del catalogo f850lp.cat se extraen las filas que indica el match
matches = list(idx[0])
f850lp = np.loadtxt('f850lp.cat')
chandra = np.loadtxt('chandra.cat')
out = []
j = 0
for i in matches:
out.append(np.concatenate(
[chandra[j],
f850lp[i]+ 2.5*np.log10(0.538)- (1.472*BV)]))
j = j+1
# Salida a archivo
np.savetxt('match_chandra.cat',out,
fmt='%d\t%.4f\t%.4f\t%.7f\t%.7f\t%d\t%.4f\t%.4f\t%.7f\t%.7f',
header='f475wN\tf475wMAG\tf475wMAGERR\tf475wALPHA\tf475wDELTA\tf850lpN\tf850lpMAG\tf850lpMAGERR\tf850lpALPHA\tf814wDELTA')
"""
Explanation: Se obtiene que la distancia es 34.67 Mpc. Dada la distancia conocida (16.5 Mpc) se esperaría una magnitud aparente de 22.09 magnitudes. Se infiere que hubo algún error en la calibración que provocó que se obtuviera una distancia de más del doble de lo esperado.
Chandra
Se decarga una imagen con 98.55ks de exposición desde el archivo de Chandra:
<img src="chandra.jpeg" width="500">
Se ejecuta sextractor en la imagen de Chandra, sin muchas configuraciones ni se hacen calibraciones de magnitud en vista de que solo se pretende saber si hay matches. De los 55 objetos detectados por sextractor todos tienen match en el catálogo del filtro F850W.
End of explanation
"""
|
mne-tools/mne-tools.github.io | stable/_downloads/d8a6d02146c5c075611a652218e020ad/30_reading_fnirs_data.ipynb | bsd-3-clause | import os.path as op
import numpy as np
import pandas as pd
import mne
"""
Explanation: Importing data from fNIRS devices
fNIRS devices consist of two kinds of optodes: light sources (AKA "emitters" or
"transmitters") and light detectors (AKA "receivers"). Channels are defined as
source-detector pairs, and channel locations are defined as the midpoint
between source and detector.
MNE-Python provides functions for reading fNIRS data and optode locations from
several file formats. Regardless of the device manufacturer or file format,
MNE-Python's fNIRS functions will internally store the measurement data and its
metadata in the same way (e.g., data values are always converted into SI
units). Supported measurement types include amplitude, optical density,
oxyhaemoglobin concentration, and deoxyhemoglobin concentration (for continuous
wave fNIRS), and additionally AC amplitude and phase (for
frequency domain fNIRS).
<div class="alert alert-danger"><h4>Warning</h4><p>MNE-Python stores metadata internally with a specific structure,
and internal functions expect specific naming conventions.
Manual modification of channel names and metadata
is not recommended.</p></div>
Standardized data
SNIRF (.snirf)
The Shared Near Infrared Spectroscopy Format
(SNIRF_)
is designed by the fNIRS community in an effort to facilitate
sharing and analysis of fNIRS data. And is the official format of the
Society for functional near-infrared spectroscopy (SfNIRS).
The manufacturers NIRx, Kernel, and Cortivision export data in the SNIRF
format, and these files can be imported in to MNE.
SNIRF is the preferred format for reading data in to MNE-Python.
Data stored in the SNIRF format can be read in
using :func:mne.io.read_raw_snirf.
<div class="alert alert-info"><h4>Note</h4><p>The SNIRF format has provisions for many different types of fNIRS
recordings. MNE-Python currently only supports reading continuous
wave or haemoglobin data stored in the .snirf format.</p></div>
Specifying the coordinate system
There are a variety of coordinate systems used to specify the location of
sensors (see tut-source-alignment for details). Where possible the
coordinate system will be determined automatically when reading a SNIRF file.
However, sometimes this is not possible and you must manually specify the
coordinate frame the optodes are in. This is done using the optode_frame
argument when loading data.
======= ================== =================
Vendor Model optode_frame
======= ================== =================
NIRx ICBM-152 MNI mri
Kernel ICBM 2009b mri
======= ================== =================
Continuous Wave Devices
NIRx (directory or hdr)
NIRx produce continuous wave fNIRS devices.
NIRx recordings can be read in using :func:mne.io.read_raw_nirx.
The NIRx device stores data directly to a directory with multiple file types,
MNE-Python extracts the appropriate information from each file.
MNE-Python only supports NIRx files recorded with NIRStar
version 15.0 and above and Aurora version 2021 and above.
MNE-Python supports reading data from NIRScout and NIRSport devices.
Hitachi (.csv)
Hitachi produce continuous wave fNIRS devices.
Hitachi fNIRS recordings can be read using :func:mne.io.read_raw_hitachi.
No optode information is stored so you'll need to set the montage manually,
see the Notes section of :func:mne.io.read_raw_hitachi.
Frequency Domain Devices
BOXY (.txt)
BOXY recordings can be read in using :func:mne.io.read_raw_boxy.
The BOXY software and ISS Imagent I and II devices are frequency domain
systems that store data in a single .txt file containing what they call
(with MNE-Python's name for that type of data in parens):
DC
All light collected by the detector (fnirs_cw_amplitude)
AC
High-frequency modulated light intensity (fnirs_fd_ac_amplitude)
Phase
Phase of the modulated light (fnirs_fd_phase)
DC data is stored as the type fnirs_cw_amplitude because it
collects both the modulated and any unmodulated light, and hence is analogous
to what is collected by continuous wave systems such as NIRx. This helps with
conformance to SNIRF standard types.
These raw data files can be saved by the acquisition devices as parsed or
unparsed .txt files, which affects how the data in the file is organised.
MNE-Python will read either file type and extract the raw DC, AC,
and Phase data. If triggers are sent using the digaux port of the
recording hardware, MNE-Python will also read the digaux data and
create annotations for any triggers.
Custom Data Import
Loading legacy data in CSV or TSV format
<div class="alert alert-danger"><h4>Warning</h4><p>This method is not supported and users are discouraged to use it.
You should convert your data to the
[SNIRF](https://github.com/fNIRS/snirf) format using the tools
provided by the Society for functional Near-Infrared Spectroscopy,
and then load it using :func:`mne.io.read_raw_snirf`.</p></div>
fNIRS measurements may be stored in a non-standardised format that is not
supported by MNE-Python and cannot be converted easily into SNIRF.
This legacy data is often in CSV or TSV format,
we show here a way to load it even though it is not officially supported by
MNE-Python due to the lack of standardisation of the file format (the
naming and ordering of channels, the type and scaling of data, and
specification of sensor positions varies between each vendor). You will likely
have to adapt this depending on the system from which your CSV originated.
End of explanation
"""
pd.DataFrame(np.random.normal(size=(16, 100))).to_csv("fnirs.csv")
"""
Explanation: First, we generate an example CSV file which will then be loaded in to
MNE-Python. This step would be skipped if you have actual data you wish to
load. We simulate 16 channels with 100 samples of data and save this to a
file called fnirs.csv.
End of explanation
"""
data = pd.read_csv('fnirs.csv')
"""
Explanation: <div class="alert alert-danger"><h4>Warning</h4><p>The channels must be ordered in haemoglobin pairs, such that for
a single channel all the types are in subsequent indices. The
type order must be 'hbo' then 'hbr'.
The data below is already in the correct order and may be
used as a template for how data must be stored.
If the order that your data is stored is different to the
mandatory formatting, then you must first read the data with
channel naming according to the data structure, then reorder
the channels to match the required format.</p></div>
Next, we will load the example CSV file.
End of explanation
"""
ch_names = ['S1_D1 hbo', 'S1_D1 hbr', 'S2_D1 hbo', 'S2_D1 hbr',
'S3_D1 hbo', 'S3_D1 hbr', 'S4_D1 hbo', 'S4_D1 hbr',
'S5_D2 hbo', 'S5_D2 hbr', 'S6_D2 hbo', 'S6_D2 hbr',
'S7_D2 hbo', 'S7_D2 hbr', 'S8_D2 hbo', 'S8_D2 hbr']
ch_types = ['hbo', 'hbr', 'hbo', 'hbr',
'hbo', 'hbr', 'hbo', 'hbr',
'hbo', 'hbr', 'hbo', 'hbr',
'hbo', 'hbr', 'hbo', 'hbr']
sfreq = 10. # in Hz
"""
Explanation: Then, the metadata must be specified manually as the CSV file does not
contain information about channel names, types, sample rate etc.
<div class="alert alert-danger"><h4>Warning</h4><p>In MNE-Python the naming of channels MUST follow the structure
``S#_D# type`` where # is replaced by the appropriate source and
detector numbers and type is either ``hbo``, ``hbr`` or the
wavelength.</p></div>
End of explanation
"""
info = mne.create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq)
raw = mne.io.RawArray(data, info, verbose=True)
"""
Explanation: Finally, the data can be converted in to an MNE-Python data structure.
The metadata above is used to create an :class:mne.Info data structure,
and this is combined with the data to create an MNE-Python
:class:~mne.io.Raw object. For more details on the info structure
see tut-info-class, and for additional details on how continuous data
is stored in MNE-Python see tut-raw-class.
For a more extensive description of how to create MNE-Python data structures
from raw array data see tut-creating-data-structures.
End of explanation
"""
montage = mne.channels.make_standard_montage('artinis-octamon')
raw.set_montage(montage)
# View the position of optodes in 2D to confirm the positions are correct.
raw.plot_sensors()
"""
Explanation: Applying standard sensor locations to imported data
Having information about optode locations may assist in your analysis.
Beyond the general benefits this provides (e.g. creating regions of interest,
etc), this is may be particularly important for fNIRS as information about
the optode locations is required to convert the optical density data in to an
estimate of the haemoglobin concentrations.
MNE-Python provides methods to load standard sensor configurations
(montages) from some vendors, and this is demonstrated below.
Some handy tutorials for understanding sensor locations, coordinate systems,
and how to store and view this information in MNE-Python are:
tut-sensor-locations, tut-source-alignment, and
ex-eeg-on-scalp.
Below is an example of how to load the optode positions for an Artinis
OctaMon device.
<div class="alert alert-info"><h4>Note</h4><p>It is also possible to create a custom montage from a file for
fNIRS with :func:`mne.channels.read_custom_montage` by setting
``coord_frame`` to ``'mri'``.</p></div>
End of explanation
"""
subjects_dir = op.join(mne.datasets.sample.data_path(), 'subjects')
mne.datasets.fetch_fsaverage(subjects_dir=subjects_dir)
brain = mne.viz.Brain('fsaverage', subjects_dir=subjects_dir,
alpha=0.5, cortex='low_contrast')
brain.add_head()
brain.add_sensors(raw.info, trans='fsaverage')
brain.show_view(azimuth=90, elevation=90, distance=500)
"""
Explanation: To validate the positions were loaded correctly it is also possible to view
the location of the sources (red), detectors (black), and channels (white
lines and orange dots) in a 3D representation.
The ficiduals are marked in blue, green and red.
See tut-source-alignment for more details.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.19/_downloads/bb8e52a46ac1372ec146fb9c9983f326/plot_15_handling_bad_channels.ipynb | bsd-3-clause | import os
from copy import deepcopy
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
"""
Explanation: Interpolating bad channels
This tutorial covers manual marking of bad channels and reconstructing bad
channels based on good signals at other sensors.
:depth: 2
As usual we'll start by importing the modules we need, and loading some example
data:
End of explanation
"""
print(raw.info['bads'])
"""
Explanation: Marking bad channels
^^^^^^^^^^^^^^^^^^^^
Sometimes individual channels malfunction and provide data that is too noisy
to be usable. MNE-Python makes it easy to ignore those channels in the
analysis stream without actually deleting the data in those channels. It does
this by
keeping track of the bad channel indices in a list and looking at that list
when doing analysis or plotting tasks. The list of bad channels is stored in
the 'bads' field of the :class:~mne.Info object that is attached to
:class:~mne.io.Raw, :class:~mne.Epochs, and :class:~mne.Evoked objects.
End of explanation
"""
picks = mne.pick_channels_regexp(raw.ch_names, regexp='EEG 05.')
raw.plot(order=picks, n_channels=len(picks))
"""
Explanation: Here you can see that the :file:.fif file we loaded from disk must have
been keeping track of channels marked as "bad" — which is good news, because
it means any changes we make to the list of bad channels will be preserved if
we save our data at intermediate stages and re-load it later. Since we saw
above that EEG 053 is one of the bad channels, let's look at it alongside
some other EEG channels to see what's bad about it. We can do this using the
standard :meth:~mne.io.Raw.plot method, and instead of listing the channel
names one by one (['EEG 050', 'EEG 051', ...]) we'll use a regular
expression_ to pick all the EEG channels between 050 and 059 with the
:func:~mne.pick_channels_regexp function (the . is a wildcard
character):
End of explanation
"""
picks = mne.pick_channels_regexp(raw.ch_names, regexp='MEG 2..3')
raw.plot(order=picks, n_channels=len(picks))
"""
Explanation: We can do the same thing for the bad MEG channel (MEG 2443). Since we
know that Neuromag systems (like the one used to record the example data) use
the last digit of the MEG channel number to indicate sensor type, here our
regular expression_ will pick all the channels that start with 2 and end
with 3:
End of explanation
"""
original_bads = deepcopy(raw.info['bads'])
raw.info['bads'].append('EEG 050') # add a single channel
raw.info['bads'].extend(['EEG 051', 'EEG 052']) # add a list of channels
bad_chan = raw.info['bads'].pop(-1) # remove the last entry in the list
raw.info['bads'] = original_bads # change the whole list at once
"""
Explanation: Notice first of all that the channels marked as "bad" are plotted in a light
gray color in a layer behind the other channels, to make it easy to
distinguish them from "good" channels. The plots make it clear that EEG
053 is not picking up scalp potentials at all, and MEG 2443 looks like
it's got a lot more internal noise than its neighbors — its signal is a few
orders of magnitude greater than the other MEG channels, making it a clear
candidate for exclusion.
If you want to change which channels are marked as bad, you can edit
raw.info['bads'] directly; it's an ordinary Python :class:list so the
usual list methods will work:
End of explanation
"""
# default is exclude='bads':
good_eeg = mne.pick_types(raw.info, meg=False, eeg=True)
all_eeg = mne.pick_types(raw.info, meg=False, eeg=True, exclude=[])
print(np.setdiff1d(all_eeg, good_eeg))
print(np.array(raw.ch_names)[np.setdiff1d(all_eeg, good_eeg)])
"""
Explanation: .. sidebar:: Blocking execution
If you want to build an interactive bad-channel-marking step into an
analysis script, be sure to include the parameter ``block=True`` in your
call to ``raw.plot()`` or ``epochs.plot()``. This will pause the script
while the plot is open, giving you time to mark bad channels before
subsequent analysis or plotting steps are executed. This can be
especially helpful if your script loops over multiple subjects.
You can also interactively toggle whether a channel is marked "bad" in the
plot windows of raw.plot() or epochs.plot() by clicking on the
channel name along the vertical axis (in raw.plot() windows you can also
do this by clicking the channel's trace in the plot area). The bads field
gets updated immediately each time you toggle a channel, and will retain its
modified state after the plot window is closed.
The list of bad channels in the :class:mne.Info object's bads field is
automatically taken into account in dozens of functions and methods across
the MNE-Python codebase. This is done consistently with a parameter
exclude='bads' in the function or method signature. Typically this
exclude parameter also accepts a list of channel names or indices, so if
you want to include the bad channels you can do so by passing
exclude=[] (or some other list of channels to exclude). For example:
End of explanation
"""
raw2 = raw.copy()
raw2.info['bads'] = []
events = mne.find_events(raw2, stim_channel='STI 014')
epochs = mne.Epochs(raw2, events=events)['2'].average().plot()
"""
Explanation: When to look for bad channels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can start looking for bad channels during the experiment session when the
data is being acquired. If you notice any flat or excessively noisy channels,
you can note them in your experiment log or protocol sheet. If your system
computes online averages, these can be a good way to spot bad channels as
well. After the data has been collected, you can do a more thorough check for
bad channels by browsing the raw data using :meth:mne.io.Raw.plot, with any
projectors or ICA applied. Finally, you can compute offline averages (again
with projectors, ICA, and EEG referencing disabled) to look for channels with
unusual properties. Here's an example of ERP/F plots where the bad channels
were not properly marked:
End of explanation
"""
raw.crop(tmin=0, tmax=3).load_data()
"""
Explanation: The bad EEG channel is not so obvious, but the bad gradiometer is easy to
see.
Remember, marking bad channels should be done as early as possible in the
analysis pipeline. When bad channels are marked in a :class:~mne.io.Raw
object, the markings will be automatically transferred through the chain of
derived object types: including :class:~mne.Epochs and :class:~mne.Evoked
objects, but also :class:noise covariance <mne.Covariance> objects,
:class:forward solution computations <mne.Forward>, :class:inverse
operators <mne.minimum_norm.InverseOperator>, etc. If you don't notice the
badness until later stages of your analysis pipeline, you'll probably need to
go back and re-run the pipeline, so it's a good investment of time to
carefully explore the data for bad channels early on.
Why mark bad channels at all?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Many analysis computations can be strongly affected by the presence of bad
channels. For example, a malfunctioning channel with completely flat signal
will have zero channel variance, which will cause noise estimates to be
unrealistically low. This low noise estimate will lead to a strong channel
weight in the estimate of cortical current, and because the channel is flat,
the magnitude of cortical current estimates will shrink dramatically.
Conversely, very noisy channels can also cause problems. For example, they
can lead to too many epochs being discarded based on signal amplitude
rejection thresholds, which in turn can lead to less robust estimation of the
noise covariance across sensors. Noisy channels can also interfere with
:term:SSP <projector> computations, because the projectors will be
spatially biased in the direction of the noisy channel, which can cause
adjacent good channels to be suppressed. ICA is corrupted by noisy channels
for similar reasons. On the other hand, when performing machine learning
analyses, bad channels may have limited, if any impact (i.e., bad channels
will be uninformative and therefore ignored / deweighted by the algorithm).
Interpolating bad channels
^^^^^^^^^^^^^^^^^^^^^^^^^^
In some cases simply excluding bad channels is sufficient (for example, if
you plan only to analyze a specific sensor ROI, and the bad channel is
outside that ROI). However, in cross-subject analyses it is often helpful to
maintain the same data dimensionality for all subjects, and there is no
guarantee that the same channels will be bad for all subjects. It is possible
in such cases to remove each channel that is bad for even a single subject,
but that can lead to a dramatic drop in data rank (and ends up discarding a
fair amount of clean data in the process). In such cases it is desirable to
reconstruct bad channels by interpolating its signal based on the signals of
the good sensors around them.
How interpolation works
~~~~~~~~~~~~~~~~~~~~~~~
Interpolation of EEG channels in MNE-Python is done using the spherical
spline method [1]_, which projects the sensor locations onto a unit sphere
and interpolates the signal at the bad sensor locations based on the signals
at the good locations. Mathematical details are presented in
channel-interpolation. Interpolation of MEG channels uses the field
mapping algorithms used in computing the forward solution
<tut-forward>.
Interpolation in MNE-Python
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Interpolating bad channels in :class:~mne.io.Raw objects is done with the
:meth:~mne.io.Raw.interpolate_bads method, which automatically applies the
correct method (spherical splines or field interpolation) to EEG and MEG
channels, respectively (there is a corresponding method
:meth:mne.Epochs.interpolate_bads that works for :class:~mne.Epochs
objects). To illustrate how it works, we'll start by cropping the raw object
to just three seconds for easier plotting:
End of explanation
"""
eeg_data = raw.copy().pick_types(meg=False, eeg=True, exclude=[])
eeg_data_interp = eeg_data.copy().interpolate_bads(reset_bads=False)
for title, data in zip(['orig.', 'interp.'], [eeg_data, eeg_data_interp]):
fig = data.plot(butterfly=True, color='#00000022', bad_color='r')
fig.subplots_adjust(top=0.9)
fig.suptitle(title, size='xx-large', weight='bold')
"""
Explanation: By default, :meth:~mne.io.Raw.interpolate_bads will clear out
raw.info['bads'] after interpolation, so that the interpolated channels
are no longer excluded from subsequent computations. Here, for illustration
purposes, we'll prevent that by specifying reset_bads=False so that when
we plot the data before and after interpolation, the affected channels will
still plot in red:
End of explanation
"""
grad_data = raw.copy().pick_types(meg='grad', exclude=[])
grad_data_interp = grad_data.copy().interpolate_bads(reset_bads=False)
for data in (grad_data, grad_data_interp):
data.plot(butterfly=True, color='#00000009', bad_color='r')
"""
Explanation: Note that we used the exclude=[] trick in the call to
:meth:~mne.io.Raw.pick_types to make sure the bad channels were not
automatically dropped from the selection. Here is the corresponding example
with the interpolated gradiometer channel; since there are more channels
we'll use a more transparent gray color this time:
End of explanation
"""
|
JAmarel/Phys202 | Interact/.ipynb_checkpoints/InteractEx04-checkpoint.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
"""
Explanation: Interact Exercise 4
Imports
End of explanation
"""
def random_line(m, b, sigma, size=10):
"""Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0]
Parameters
----------
m : float
The slope of the line.
b : float
The y-intercept of the line.
sigma : float
The standard deviation of the y direction normal distribution noise.
size : int
The number of points to create for the line.
Returns
-------
x : array of floats
The array of x values for the line with `size` points.
y : array of floats
The array of y values for the lines with `size` points.
"""
domain = np.linspace(-1.0,1.0,size)
if sigma == 0:
return domain, m*domain + b
else:
return domain, m*domain + b + np.random.normal(0,sigma**2,size)
m = 0.0; b = 1.0; sigma=0.0; size=3
x, y = random_line(m, b, sigma, size)
assert len(x)==len(y)==size
assert list(x)==[-1.0,0.0,1.0]
assert list(y)==[1.0,1.0,1.0]
sigma = 1.0
m = 0.0; b = 0.0
size = 500
x, y = random_line(m, b, sigma, size)
assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1)
assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1)
"""
Explanation: Line with Gaussian noise
Write a function named random_line that creates x and y data for a line with y direction random noise that has a normal distribution $N(0,\sigma^2)$:
$$
y = m x + b + N(0,\sigma^2)
$$
Be careful about the sigma=0.0 case.
End of explanation
"""
def ticks_out(ax):
"""Move the ticks to the outside of the box."""
ax.get_xaxis().set_tick_params(direction='out', width=1, which='both')
ax.get_yaxis().set_tick_params(direction='out', width=1, which='both')
def plot_random_line(m, b, sigma, size=10, color='red'):
"""Plot a random line with slope m, intercept b and size points."""
x,y = random_line(m, b, sigma, size)
plt.figure(figsize=(9,6))
plt.scatter(x,y,color = color)
plt.xlim(-1.1,1.1)
plt.ylim(-10.0,10.0)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Plot of a line with Gaussian noise')
plt.tick_params(axis='x',top='off',direction='out')
plt.tick_params(axis='y',right='off',direction='out')
plot_random_line(5.0, -1.0, 2.0, 50)
assert True # use this cell to grade the plot_random_line function
"""
Explanation: Write a function named plot_random_line that takes the same arguments as random_line and creates a random line using random_line and then plots the x and y points using Matplotlib's scatter function:
Make the marker color settable through a color keyword argument with a default of red.
Display the range $x=[-1.1,1.1]$ and $y=[-10.0,10.0]$.
Customize your plot to make it effective and beautiful.
End of explanation
"""
interact(plot_random_line,m=(-10.0,10.0,0.1),b=(-5.0,5.0,0.1),sigma=(0.0,5.0,0.01),size=(10,100,10),color=('red','green','blue'));
#### assert True # use this cell to grade the plot_random_line interact
"""
Explanation: Use interact to explore the plot_random_line function using:
m: a float valued slider from -10.0 to 10.0 with steps of 0.1.
b: a float valued slider from -5.0 to 5.0 with steps of 0.1.
sigma: a float valued slider from 0.0 to 5.0 with steps of 0.01.
size: an int valued slider from 10 to 100 with steps of 10.
color: a dropdown with options for red, green and blue.
End of explanation
"""
|
pmgbergen/porepy | tutorials/ad_framework.ipynb | gpl-3.0 | import numpy as np
import porepy as pp
import scipy.sparse.linalg as spla
# fractures 1 and 2 cross each other in (3, 3)
frac_1 = np.array([[2, 2], [2, 4]])
frac_2 = np.array([[2, 5], [3, 3]])
# fracture 3 is isolated
frac_3 = np.array([[6, 6], [1, 5]])
gb = pp.meshing.cart_grid([frac_1, frac_2, frac_3], nx=np.array([7, 7]))
"""
Explanation: Setting up equations using Automatic Differentiation and abstract equations
This tutorial is meant as an introduction to a new framework for defining and working with (non-linear) equations in PorePy. Specifically, the aim is to develop an approach which:
1. Gives a transparent way of specifying non-linear, multiphysics and multi-dimensional equations.
2. Speeds up assembly of Jacobian matrices, in particular for geometries with many subdomains.
3. Is better suited to combine with general linear and non-linear solvers etc.
Disclaimer
The framework, referred to as the "ad framework' (ad = automatic differentiation) is currently (Spring 2021) under more or less active development. The below tutorial is intended to give an overview of the design and use of the framework, to ease adaptation in new projects. Since the code is under active development, the code will change, hopefully, this tutorial will keep track. In the same spirit, the tutorial will strive to point to uncertainties on how the code will actually function, indicate code that is likely to change, document best practice when opinions on this exist etc.
Background
Over its first few years of existence, PorePy was mainly applied to linear problems; the development was focused more on mixed-dimensionality than on non-linearities. There were two notable exceptions:
1. Simulations of viscous fingering in fractured domains, paper here
2. Simulations of thermo-poromechanics coupled with deformation of fractures, where the latter was modeled as a contact mechanics problem, see for instance this paper
The two projects both used a Newton-type approach to solving the resulting linear system, but took fundamentally different approaches in the linearization: In the contact-mechanics problem, the Jacobian matrix was formed 'by hand' on a block-matrix level, so, to correctly linearize complex expressions, the user was responsible for applying the chain rule correctly on all terms, on all subdomains. In addition to requiring precision of the user, this approach become somewhat cumbersome on the interface between subdomains were extra classes were had to be implemneted to couple different terms (technically, this has to do with the design of the Assembler object; however, for the purpose of this tutorial there is no need to understand this fully).
The non-linear transport problem took a different approach: The project implemented Ad, and thereby removed the painstaking implementation of the Jacobian matrix. To see this works, look first at the tutorial on Ad in general, and next on the tutorial on how to combine Ad with discretization operations in general.
Scope of the new Ad framework
The new approach to Ad can be seen as an extension of the existing functionality, with the following ambitions:
1. For the purpose of writing equations, it should be possible to consider multiple grids simultaneously, with no need for for-loops or similar.
2. Instead of the immediate evaluation of residuals and derivatives applied in the existing framework, the new approach should apply delayed evaluation.
The first point will both improve code redability, and substantially improve runtimes in cases with many subdomains. The latter allows for advanced linear and non-linear solvers, and possibly for automatic updates of discretizations of non-linear terms, both of which will be explored in the future.
Framework components
So far, the framework consists of three types of classes:
1. Grid-dependent operators, defined on one or multiple subdomain grids. Examples are:
* divergence and trace operators
* boundary conditions,
* projections between mortar and subdomain grids
* projections between sets of subdomains and subsets.
2. Variables. These carry the numerical state of the primary variables, and also values at previous time steps and iteration states.
3. Discretization objects. These are mainly shells around standard PorePy discretiation methods.
4. Classes needed to turn variables and discretizations into equations, linearize them etc.
Test case: A mixed-dimensional grid.
As a test case, we define a mixed-dimensional grid, which we for simplicity let be Cartesian
End of explanation
"""
# String representations of the variables.
pressure_var = 'pressure'
mortar_var = 'mortar_flux'
# Loop over all subdomains, define a cell centered variable
for _, d in gb:
d[pp.PRIMARY_VARIABLES] = {pressure_var: {'cells': 1}}
# Also loop over interfaces
for _, d in gb.edges():
d[pp.PRIMARY_VARIABLES] = {mortar_var: {'cells': 1}}
"""
Explanation: Next, we define variables on the subdomains and interfaces. This is done as before:
End of explanation
"""
param_key = 'flow'
matrix_perm = 1
fracture_perm = 1e2
interface_diffusivity = 1e2
for g, d in gb:
if g.dim == 2:
perm = pp.SecondOrderTensor(matrix_perm * np.ones(g.num_cells))
# Dirichlet conditions on right and left
left = np.where(np.abs(g.face_centers[0] - gb.bounding_box()[0][0]) < 1e-6)[0]
right = np.where(np.abs(g.face_centers[0] - gb.bounding_box()[1][0]) < 1e-6)[0]
bc_cond = ['dir'] * (left.size + right.size)
bc = pp.BoundaryCondition(g, np.hstack((left, right)), bc_cond)
bc_val = np.zeros(g.num_faces)
bc_val[left] = 1
specified_data = {'second_order_tensor': perm,
'bc': bc,
'bc_values': bc_val}
d = pp.initialize_data(g, d, param_key, specified_data)
else:
perm = pp.SecondOrderTensor(fracture_perm * np.ones(g.num_cells))
# No-flow Neumann conditions
bc = pp.BoundaryCondition(g)
bc_val = np.zeros(g.num_faces)
specified_data = {'second_order_tensor': perm,
'bc': bc,
'bc_values': bc_val}
d = pp.initialize_data(g, d, param_key, specified_data)
# Initialize data for interfaces as well
for e, d in gb.edges():
mg = d['mortar_grid']
kn = interface_diffusivity * np.ones(mg.num_cells)
pp.initialize_data(mg, d, param_key, {'normal_diffusivity': kn})
"""
Explanation: Parameter assignmnet is also done as before, see this tutorial for details. Specifically, we will consider a mixed-dimensional flow problem.
End of explanation
"""
for g, d in gb:
pp.set_state(d)
d[pp.STATE][pressure_var] = np.random.rand(g.num_cells)
for e, d in gb.edges():
pp.set_state(d)
d[pp.STATE][mortar_var] = np.random.rand(d['mortar_grid'].num_cells)
"""
Explanation: We also give numerical values to the pressure and flux variables, just so that we get more interesting numbers below
End of explanation
"""
grid_list = [g for g, _ in gb]
edge_list = [e for e, _ in gb.edges()]
"""
Explanation: Definition of grid-related operators
Now, we are ready to apply the new Ad framework to this mixed-dimensional problem. The key to exploit this efficiently (in terms of both user friendliness and computational speed) is to operate on several grids simultaneously. To that end, we make a list of all subdomain grids, and similarly of all the edges (not mortar grids - we need to keep the link to the adjacent subdomains).
NOTE: The order of the grid in the list is important, as it sets the ordering of variables, discretization object etc. It is recommended to define a list of grids and use this throughout to define variables etc. A list of mortar grids should be made similarly.
End of explanation
"""
div = pp.ad.Divergence(grids=grid_list)
"""
Explanation: Now, we can for instance define a joint divergence operator for all subdomains:
End of explanation
"""
type(div)
"""
Explanation: Note that this is not a matrix, but a special object:
End of explanation
"""
mortar_proj = pp.ad.MortarProjections(gb=gb, grids=grid_list, edges=edge_list)
"""
Explanation: We will come back to how to translate div into a numerical expression.
We can also define merged projection operators between the subdomain and mortar grids. This can be done either on the whole gb, or on parts of it. The ordering of the grids is important, and frankly not completely clear, but the following seems to work (if you get a warning, disregard it; this will be handled at a later point):
End of explanation
"""
bound_ad = pp.ad.BoundaryCondition(param_key, grids=grid_list)
"""
Explanation: Critically, the initialization defines a list of grids (and edges), just the same way as we did in the grid list, and, since iterations over the grid bucket items uses a fixed order, we're good.
Finally, we will need a representation of boundary conditions:
End of explanation
"""
dof_manager = pp.DofManager(gb) # note: no pp.ad here
"""
Explanation: Again, this is not a numerical boundary condition, but rather a way to access given boundary data.
Mixed-dimensional Ad variables
The next step is to define Ad representations of the (mixed-dimensional) variables. For this, we need no less than three different steps (fortunately, we can use these objects for other parts below as well).
First, define a degree of freedom manager. For users who have been exposed to the Assembler, this is actually part of that class which has been moved to a separate object, which is responsible for keeping track of local which indices belong to which degrees of freedom:
End of explanation
"""
equation_manager = pp.ad.EquationManager(gb, dof_manager)
"""
Explanation: Next, define an EquationManager. This is a class which may be significantly changed in the months to come, but for the moment, it is responsible for providing Ad representations of the variables.
End of explanation
"""
p = equation_manager.merge_variables([(g, pressure_var) for g in grid_list])
lmbda = equation_manager.merge_variables([(e, mortar_var) for e in edge_list])
"""
Explanation: Finally, we can define Ad variables
End of explanation
"""
print(p)
print(lmbda)
"""
Explanation: Note that p and lmbda do not have numerical values. What we have done is instead to prepare to:
1. Prepare the ground to write equations with the equations
2. Prepare for the later translation of the equations to numerical values (values and derivatives)
To get some information about the variables, we can type
End of explanation
"""
mpfa = pp.ad.MpfaAd(param_key, grid_list)
"""
Explanation: Mixed-dimensional ad equations
Next, we turn to discretization. To be compatible with the Ad framework, PorePy discretizations need a wrapper which mainly allows for the delayed evaluation of the expressions. For instance, the Ad version of Mpfa is defined by writing
End of explanation
"""
interior_flux = mpfa.flux * p
"""
Explanation: This object, once again, has no numerical values, but is rather an abstract representation of a standard Mpfa discretization. The two versions of Mpfa refer to the discretization matrices resulting from the discretization in similar ways: Mpfa has attributes like 'flux_matrix_key', which specifies where the flux discretization matrix is stored. Similarly, MpfaAd has an attribute 'flux', which, upon parsing of an Ad experession (below), will access the same discretization matrix.
To show how this works in action, we can define the flux discretization on subdomain as
End of explanation
"""
interior_flux.discretize(gb)
num_flux = interior_flux.evaluate(dof_manager)
print(num_flux)
"""
Explanation: In essence, there are two types of Ad objects:
1. Atomic objects, like mpfa.flux and p. These can be considered pointers to places in the data dictionary where the numerical values associated with the objects are stored. For instance, p in our example points to a collection of d[pp.STATE][pressure_var], where d is the data dictionary for each of the grids on which p was defined.
2. Composite objects, like interior_flux, formed by combining Ad objects (which themselves can be atomic or composites) using basic mathematical operations.
These Ad objects are not designed for numerical evaluation by themselves, they can be thought of as recipes for combining discretizations, variables etc (). To parse a recipe, we provide it with a GridBucket, from where it can pull numerical values for variables, discretization matrices and so on.
End of explanation
"""
full_flux = interior_flux + mpfa.bound_flux * bound_ad + mpfa.bound_flux*mortar_proj.mortar_to_primary_int * lmbda
"""
Explanation: We note that num_flux has the size of the total number of faces in the grids, and that its Jacobian matrix is a mapping from cells to faces.
On a technical level (no need to understand this), composite Ad objcets are implemented as a tree structure, where the leaves are atomic Ad objects. Parsing of the expression is done by identification of these leves, and then use standard forward Ad to evaluate the composites.
We can define more elaborate combinations of variables. The interior_flux object (side note: Even though we just wrapped it into an Expression, the original composite Ad object is still alive) represents only the part of the flux caused by pressure variations internal to subdomains. To get the full flux, we need to account for boundary conditions from external boundaries, as well as from internal boundaries to domains of lower dimensions.
Note that for the time being, we cannot write 'mpfa.bound_flux * (bound_ad + mortar_proj... * lmbba); the parsing of the expressions do not respect parathesis the way it should. To be improved, hopefully.
End of explanation
"""
print(f'Size of value array: {num_flux.val.shape}')
print(f'Size of Jacobian matrix: {num_flux.jac.shape}')
"""
Explanation: Now, it is interesting to see what happens when the numerical value of full_flux is computed:
End of explanation
"""
sources_from_mortar = mortar_proj.mortar_to_secondary_int * lmbda
"""
Explanation: Compare the size of the Jacobian matrix with the size of the matrix for int_flux: The number of rows is still equal to the total number of faces in the grid, but the number of columns has increased to also include derivatives with respect to the mortar variables.
We can also compute the projection of the mortar fluxes onto the lower-dimensional subdomains, where they are manifested as sources:
End of explanation
"""
conservation = div * full_flux + sources_from_mortar
"""
Explanation: Put together, we now have the full mass conservation equation on all subdomains:
End of explanation
"""
pressure_trace_from_high = (mortar_proj.primary_to_mortar_avg * mpfa.bound_pressure_cell * p
+ mortar_proj.primary_to_mortar_avg * mpfa.bound_pressure_face * mortar_proj.mortar_to_primary_int * lmbda
)
"""
Explanation: We can also define equations for the interface mortars. To that end, we first define the pressure trace on internal boundaries - the most accurate representation of this trace is a bit complex within Mpfa (and Tpfa) methods
End of explanation
"""
robin = pp.ad.RobinCouplingAd(param_key, edge_list)
"""
Explanation: Next, we define a discretization object for the mortar equation
End of explanation
"""
interface_flux_eq = (robin.mortar_discr * (pressure_trace_from_high -
mortar_proj.secondary_to_mortar_avg * p)
+ lmbda)
"""
Explanation: Now, we can write the Darcy-type equation for the interface flux
End of explanation
"""
eqs = {'subdomain_conservation': conservation, 'interface_fluxes': interface_flux_eq}
equation_manager.equations.update(eqs)
"""
Explanation: Assemble the system of equations
Now, we only have to feed the equations to the equation manager to be ready to assemble the full system, formed by the conservation statement and the interface flux equations:
End of explanation
"""
# first discretize
equation_manager.discretize(gb)
# next assemble the equations
A, b = equation_manager.assemble()
# Solve, system, note the minus sign on the right hand side
solution = spla.spsolve(A, b)
# Distribute variable to local data dictionaries
dof_manager.distribute_variable(solution, additive=True)
exporter = pp.Exporter(gb, 'ad_test')
exporter.write_vtu([pressure_var])
"""
Explanation: The equation_manager can be used to assemble the coupled linear system, much in the same way as a recipe is evaluated. Before that, the discretization matrices must be constructed.
NOTE: The computed solution has the interpretation of the update to the existing state, that is, the random values we assigned above. The solution must be distributed in an additive manner.
End of explanation
"""
p_prev = p.previous_timestep()
"""
Explanation: What have we done
We summarize the steps needed to define an equation:
1. Define variables
2. Define grid-related operators (not strictly necessary, but most often)
3. Define discretizations
4. Combine into equations, and evaluate.
More advanced usage
Below are a few additional techniques which are needed to define other types of equations (to be covered in a more elaborate set of tutorials in the future?):
To access the state of a variable on the previous time step, do
End of explanation
"""
g_2d = gb.grids_of_dimension(2)
subdomain_proj = pp.ad.SubdomainProjections(grids=g_2d)
"""
Explanation: To use a variable on only a few subdomains, use subdomain projections:
End of explanation
"""
|
dynaryu/rmtk | rmtk/vulnerability/derivation_fragility/R_mu_T_dispersion/SPO2IDA/spo2ida.ipynb | agpl-3.0 | from rmtk.vulnerability.derivation_fragility.R_mu_T_dispersion.SPO2IDA import SPO2IDA_procedure
from rmtk.vulnerability.common import utils
%matplotlib inline
"""
Explanation: SPO2IDA
This methodology uses the SPO2IDA tool described in Vamvatsikos and Cornell (2006) to convert static pushover curves into $16\%$, $50\%$, and $84\%$ IDA curves. The SPO2IDA tool is based on empirical relationships obtained from a large database of incremental dynamic analysis results. This procedure is applicable to any kind of multi-linear capacity curve and it is suitable for single-building fragility curve estimation. Individual fragility curves can later be combined into a single fragility curve that considers the inter-building uncertainty. The figure below illustrates the IDA curves estimated using this methodology for a given capacity curve.
<img src="../../../../../figures/spo2ida.jpg" width="500" align="middle">
Note: To run the code in a cell:
Click on the cell to select it.
Press SHIFT+ENTER on your keyboard or press the play button (<button class='fa fa-play icon-play btn btn-xs btn-default'></button>) in the toolbar above.
End of explanation
"""
capacity_curves_file = "../../../../../../rmtk_data/capacity_curves_Vb-dfloor.csv"
input_spectrum = "../../../../../../rmtk_data/FEMAP965spectrum.txt"
capacity_curves = utils.read_capacity_curves(capacity_curves_file)
Sa_ratios = utils.get_spectral_ratios(capacity_curves, input_spectrum)
utils.plot_capacity_curves(capacity_curves)
"""
Explanation: Load capacity curves
In order to use this methodology, it is necessary to provide one (or a group) of capacity curves, defined according to the format described in the RMTK manual. In case multiple capacity curves are input, a spectral shape also needs to be defined.
Please provide the location of the file containing the capacity curves using the parameter capacity_curves_file.
Please also provide a spectral shape using the parameter input_spectrum if multiple capacity curves are used.
End of explanation
"""
idealised_type = "quadrilinear"
idealised_capacity = utils.idealisation(idealised_type, capacity_curves)
utils.plot_idealised_capacity(idealised_capacity, capacity_curves, idealised_type)
"""
Explanation: Idealise pushover curves
In order to use this methodology the pushover curves need to be idealised. Please choose an idealised shape using the parameter idealised_type. The valid options for this methodology are "bilinear" and "quadrilinear". Idealised curves can also be directly provided as input by setting the field Idealised to TRUE in the input file defining the capacity curves.
End of explanation
"""
damage_model_file = "../../../../../../rmtk_data/damage_model_ISD.csv"
damage_model = utils.read_damage_model(damage_model_file)
"""
Explanation: Load damage state thresholds
Please provide the path to your damage model file using the parameter damage_model_file in the cell below. Currently only interstorey drift damage model type is supported.
End of explanation
"""
montecarlo_samples = 50
fragility_model = SPO2IDA_procedure.calculate_fragility(capacity_curves, idealised_capacity, damage_model, montecarlo_samples, Sa_ratios, 1)
"""
Explanation: Calculate fragility functions
The damage threshold dispersion is calculated and integrated with the record-to-record dispersion through Monte Carlo simulations. Please enter the number of Monte Carlo samples to be performed using the parameter montecarlo_samples in the cell below.
End of explanation
"""
minIML, maxIML = 0.01, 2
utils.plot_fragility_model(fragility_model, minIML, maxIML)
print fragility_model
"""
Explanation: Plot fragility functions
The following parameters need to be defined in the cell below in order to plot the lognormal CDF fragility curves obtained above:
* minIML and maxIML: These parameters define the limits of the intensity measure level for plotting the functions
End of explanation
"""
taxonomy = "RC"
minIML, maxIML = 0.01, 2.00
output_type = "csv"
output_path = "../../../../../../rmtk_data/output/"
utils.save_mean_fragility(taxonomy, fragility_model, minIML, maxIML, output_type, output_path)
"""
Explanation: Save fragility functions
The derived parametric fragility functions can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:
1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.
2. minIML and maxIML: These parameters define the bounds of applicability of the functions.
3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are "csv" and "nrml".
End of explanation
"""
cons_model_file = "../../../../../../rmtk_data/cons_model.csv"
imls = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50,
0.60, 0.70, 0.80, 0.90, 1.00, 1.20, 1.40, 1.60, 1.80, 2.00]
distribution_type = "lognormal"
cons_model = utils.read_consequence_model(cons_model_file)
vulnerability_model = utils.convert_fragility_vulnerability(fragility_model, cons_model,
imls, distribution_type)
"""
Explanation: Obtain vulnerability function
A vulnerability model can be derived by combining the set of fragility functions obtained above with a consequence model. In this process, the fractions of buildings in each damage state are multiplied by the associated damage ratio from the consequence model, in order to obtain a distribution of loss ratio for each intensity measure level.
The following parameters need to be defined in the cell below in order to calculate vulnerability functions using the above derived fragility functions:
1. cons_model_file: This parameter specifies the path of the consequence model file.
2. imls: This parameter specifies a list of intensity measure levels in increasing order at which the distribution of loss ratios are required to be calculated.
3. distribution_type: This parameter specifies the type of distribution to be used for calculating the vulnerability function. The distribution types currently supported are "lognormal", "beta", and "PMF".
End of explanation
"""
utils.plot_vulnerability_model(vulnerability_model)
"""
Explanation: Plot vulnerability function
End of explanation
"""
taxonomy = "RC"
output_type = "nrml"
output_path = "../../../../../../rmtk_data/output/"
utils.save_vulnerability(taxonomy, vulnerability_model, output_type, output_path)
"""
Explanation: Save vulnerability function
The derived parametric or nonparametric vulnerability function can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:
1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.
3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are "csv" and "nrml".
End of explanation
"""
|
IS-ENES-Data/submission_forms | test/Templates/.ipynb_checkpoints/CMIP6_submission_form-checkpoint.ipynb | apache-2.0 | from dkrz_forms import form_widgets
form_widgets.show_status('form-submission')
"""
Explanation: DKRZ CMIP6 submission form for ESGF data publication
General Information (to be completed based on official CMIP6 references)
Data to be submitted for ESGF data publication must follow the rules outlined in the CMIP6 Archive Design <br /> (https://...)
Thus file names have to follow the pattern:<br />
VariableName_Domain_GCMModelName_CMIP6ExperimentName_CMIP5EnsembleMember_RCMModelName_RCMVersionID_Frequency[_StartTime-EndTime].nc <br />
Example: tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc
The directory structure in which these files are stored follow the pattern:<br />
activity/product/Domain/Institution/
GCMModelName/CMIP5ExperimentName/CMIP5EnsembleMember/
RCMModelName/RCMVersionID/Frequency/VariableName <br />
Example: CORDEX/output/AFR-44/MPI-CSC/MPI-M-MPI-ESM-LR/rcp26/r1i1p1/MPI-CSC-REMO2009/v1/mon/tas/tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc
Notice: If your model is not yet registered, please contact contact ....
This 'data submission form' is used to improve initial information exchange between data providers and the data center. The form has to be filled before the publication process can be started. In case you have questions pleas contact the individual data center:
o DKRZ: cmip6@dkrz.de
End of explanation
"""
# initialize your CORDEX submission form template
from dkrz_forms import form_handler
from dkrz_forms import checks
"""
Explanation: Start submission procedure
The submission is based on this interactive document consisting of "cells" you can modify and then evaluate
evaluation of cells is done by selecting the cell and then press the keys "Shift" + "Enter"
<br /> please evaluate the following cell to initialize your form
End of explanation
"""
my_email = "..." # example: sf.email = "Mr.Mitty@yahoo.com"
my_first_name = "..." # example: sf.first_name = "Harold"
my_last_name = "..." # example: sf.last_name = "Mitty"
my_keyword = "..." # example: sf.keyword = "mymodel_myrunid"
sf = form_handler.init_form("CORDEX",my_first_name,my_last_name,my_email,my_keyword)
"""
Explanation: please provide information on the contact person for this CORDEX data submission request
End of explanation
"""
sf.submission_type = "..." # example: sf.submission_type = "initial_version"
"""
Explanation: Type of submission
please specify the type of this data submission:
- "initial_version" for first submission of data
- "new _version" for a re-submission of previousliy submitted data
- "retract" for the request to retract previously submitted data
End of explanation
"""
sf.institution = "..." # example: sf.institution = "Alfred Wegener Institute"
"""
Explanation: Requested general information
... to be finalized as soon as CMIP6 specification is finalized ....
Please provide model and institution info as well as an example of a file name
institution
The value of this field has to equal the value of the optional NetCDF attribute 'institution'
(long version) in the data files if the latter is used.
End of explanation
"""
sf.institute_id = "..." # example: sf.institute_id = "AWI"
"""
Explanation: institute_id
The value of this field has to equal the value of the global NetCDF attribute 'institute_id'
in the data files and must equal the 4th directory level. It is needed before the publication
process is started in order that the value can be added to the relevant CORDEX list of CV1
if not yet there. Note that 'institute_id' has to be the first part of 'model_id'
End of explanation
"""
sf.model_id = "..." # example: sf.model_id = "AWI-HIRHAM5"
"""
Explanation: model_id
The value of this field has to be the value of the global NetCDF attribute 'model_id'
in the data files. It is needed before the publication process is started in order that
the value can be added to the relevant CORDEX list of CV1 if not yet there.
Note that it must be composed by the 'institute_id' follwed by the RCM CORDEX model name,
separated by a dash. It is part of the file name and the directory structure.
End of explanation
"""
sf.experiment_id = "..." # example: sf.experiment_id = "evaluation"
# ["value_a","value_b"] in case of multiple experiments
sf.time_period = "..." # example: sf.time_period = "197901-201412"
# ["time_period_a","time_period_b"] in case of multiple values
"""
Explanation: experiment_id and time_period
Experiment has to equal the value of the global NetCDF attribute 'experiment_id'
in the data files. Time_period gives the period of data for which the publication
request is submitted. If you intend to submit data from multiple experiments you may
add one line for each additional experiment or send in additional publication request sheets.
End of explanation
"""
sf.example_file_name = "..." # example: sf.example_file_name = "tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc"
# Please run this cell as it is to check your example file name structure
# to_do: implement submission_form_check_file function - output result (attributes + check_result)
form_handler.cordex_file_info(sf,sf.example_file_name)
"""
Explanation: Example file name
Please provide an example file name of a file in your data collection,
this name will be used to derive the other
End of explanation
"""
sf.grid_mapping_name = "..." # example: sf.grid_mapping_name = "rotated_latitude_longitude"
"""
Explanation: information on the grid_mapping
the NetCDF/CF name of the data grid ('rotated_latitude_longitude', 'lambert_conformal_conic', etc.),
i.e. either that of the native model grid, or 'latitude_longitude' for the regular -XXi grids
End of explanation
"""
sf.grid_as_specified_if_rotated_pole = "..." # example: sf.grid_as_specified_if_rotated_pole = "yes"
"""
Explanation: Does the grid configuration exactly follow the specifications in ADD2 (Table 1)
in case the native grid is 'rotated_pole'? If not, comment on the differences; otherwise write 'yes' or 'N/A'. If the data is not delivered on the computational grid it has to be noted here as well.
End of explanation
"""
sf.data_qc_status = "..." # example: sf.data_qc_status = "QC2-CORDEX"
sf.data_qc_comment = "..." # any comment of quality status of the files
"""
Explanation: Please provide information on quality check performed on the data you plan to submit
Please answer 'no', 'QC1', 'QC2-all', 'QC2-CORDEX', or 'other'.
'QC1' refers to the compliancy checker that can be downloaded at http://cordex.dmi.dk.
'QC2' refers to the quality checker developed at DKRZ.
If your answer is 'other' give some informations.
End of explanation
"""
sf.terms_of_use = "..." # example: sf.terms_of_use = "unrestricted"
"""
Explanation: Terms of use
Please give the terms of use that shall be asigned to the data.
The options are 'unrestricted' and 'non-commercial only'.
For the full text 'Terms of Use' of CORDEX data refer to
http://cordex.dmi.dk/joomla/images/CORDEX/cordex_terms_of_use.pdf
End of explanation
"""
sf.directory_structure = "..." # example: sf.directory_structure = "compliant"
"""
Explanation: Information on directory structure and data access path
(and other information needed for data transport and data publication)
If there is any directory structure deviation from the CORDEX standard please specify here.
Otherwise enter 'compliant'. Please note that deviations MAY imply that data can not be accepted.
End of explanation
"""
sf.data_path = "..." # example: sf.data_path = "mistral.dkrz.de:/mnt/lustre01/work/bm0021/k204016/CORDEX/archive/"
sf.data_information = "..." # ...any info where data can be accessed and transfered to the data center ... "
"""
Explanation: Give the path where the data reside, for example:
blizzard.dkrz.de:/scratch/b/b364034/. If not applicable write N/A and give data access information in the data_information string
End of explanation
"""
sf.exclude_variables_list = "..." # example: sf.exclude_variables_list=["bnds", "vertices"]
"""
Explanation: Exclude variable list
In each CORDEX file there may be only one variable which shall be published and searchable at the ESGF portal (target variable). In order to facilitate publication, all non-target variables are included in a list used by the publisher to avoid publication. A list of known non-target variables is [time, time_bnds, lon, lat, rlon ,rlat ,x ,y ,z ,height, plev, Lambert_Conformal, rotated_pole]. Please enter other variables into the left field if applicable (e.g. grid description variables), otherwise write 'N/A'.
End of explanation
"""
sf.uniqueness_of_tracking_id = "..." # example: sf.uniqueness_of_tracking_id = "yes"
"""
Explanation: Uniqueness of tracking_id and creation_date
In case any of your files is replacing a file already published, it must not have the same tracking_id nor
the same creation_date as the file it replaces.
Did you make sure that that this is not the case ?
Reply 'yes'; otherwise adapt the new file versions.
End of explanation
"""
sf.variable_list_day = [
"clh","clivi","cll","clm","clt","clwvi",
"evspsbl","evspsblpot",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","prc","prhmax","prsn","prw","ps","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","tauu","tauv","ta200","ta500","ta850","ts",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850","wsgsmax",
"zg200","zg500","zmla"
]
sf.variable_list_mon = [
"clt",
"evspsbl",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","ta200",
"ta500","ta850",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850",
"zg200","zg500"
]
sf.variable_list_sem = [
"clt",
"evspsbl",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","ta200","ta500","ta850",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850",
"zg200","zg500"
]
sf.variable_list_fx = [
"areacella",
"mrsofc",
"orog",
"rootd",
"sftgif","sftlf"
]
"""
Explanation: Variable list
list of variables submitted -- please remove the ones you do not provide:
End of explanation
"""
# simple consistency check report for your submission form
res = form_handler.check_submission(sf)
sf.sub['status_flag_validity'] = res['valid_submission']
form_handler.DictTable(res)
"""
Explanation: Check your submission before submission
End of explanation
"""
form_handler.form_save(sf)
#evaluate this cell if you want a reference to the saved form emailed to you
# (only available if you access this form via the DKRZ form hosting service)
form_handler.email_form_info()
# evaluate this cell if you want a reference (provided by email)
# (only available if you access this form via the DKRZ hosting service)
form_handler.email_form_info(sf)
"""
Explanation: Save your form
your form will be stored (the form name consists of your last name plut your keyword)
End of explanation
"""
form_handler.email_form_info(sf)
form_handler.form_submission(sf)
"""
Explanation: officially submit your form
the form will be submitted to the DKRZ team to process
you also receive a confirmation email with a reference to your online form for future modifications
End of explanation
"""
|
brentjm/Impurity-Predictions | notebooks/temp.ipynb | bsd-2-clause | import numpy as np
import pandas as pd
import argparse as ap
def mass_density_sat(T):
"""
Mass of water in one cubic meter of air at one bar at temperature T
parameters:
T: float - Temperature (K)
returns float - mass of water in one cubic meter saturated air (kg/m^3)
"""
return 5.079e-3 + 2.5547e-4*T + 1.6124*e-5*T**2 + 3.6608e-9*T**3 + 3.9911e-9*T**4
def mass_water_vapor(T, rh, V):
"""
Calculate the total mass of water in vapor in air at a
specified rh and a given size container.
parameters
T: float - Temperature (K)
rh: float - relative humidity (%)
V: float - volume of vapor (m^3)
"""
return mass_density_sat(T) * rh * V
def GAB(aw, wm, K, C):
"""
Calculate the water content of a substance based on the Guggenheim, Anderson and de Boer (GAB) three-parameter
isotherm model. See "GAB Generalized Equation for Sorption Phenomena", Food and Bioprocess Technology
March 2008 Vol 1, Issue 1, pp 82--90
w = (wm*C*K*aw) / ((1-K*aw)*(1-K*aw+C*K*aw))
parameters:
aw: float - water activity
wm: float - GAB parameter
K: float - GAB parameter
C: float - GAB parameter
returns float - water content of substance (mass water substance (kg) / mass substance (kg))
"""
return (wm * C * K * aw) / ((1 - K * aw) * (1 - K * aw + c * K * aw))
def mass_water_solid_mixture(aw, frac, params):
"""
Calculate the mass of water in a solid mixture at a specified water activity using
the superposition of GAB parameters.
parameters:
aw: float - water activity
frac: list of floats - list of mass fractions of the individual components [f_i] ; for i=1...N
params: list of dictionaries - list of GAB parameters [{wm, C, K}_i] ; for i=1...N
returns float - mass of water in solid (kg)
"""
return np.sum([frac[i] * GAB(aw, p["wm"], p["K"], p["C"]) for i, p in enumerate(params)])
def GAB_regress(aw, w):
"""
Calculate the GAB parameters from water content - humidity measurements.
See "GAB Generalized Equation for Sorption Phenomena", Food and Bioprocess Technology
March 2008 Vol 1, Issue 1, pp 82--90
aw/w = a + b*aw + c*aw^2
a = 1/(wm*C*K)
b = (C-2)/(wm*C)
c = K(1-C)/(wm*C)
parameters
aw: array - water activity
w: array - water content at each water activity point
returns dictionary {wm: float, C: float, K: float}
"""
y = aw / w
[c, b, a] = np.polyfit(aw, y, 2)
K = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
C = b / (a*K) + 2
wm = 1 / (b + 2*K*a)
return {wm: wm, C: C, K: K}
def average_GAB(frac, params):
"""
Calculate GAB parameters for a solid mixture.
parameters:
frac: list of floats - list of mass fractions of the individual components [f_i] ; for i=1...N
params: list of dictionaries - list of GAB parameters [{wm, C, K}_i] ; for i=1...N
returns float - dictionary of GAB parameters {wm, C, K}
"""
aw = np.arange(.1,.9,.05)
w = np.array([mass_water_solid_mixture(aw_i, frac, params) for aw_i in aw])
return GAB_regress(aw, w)
def water_activity_GAB(w, wm, C, K):
"""
Calculate the water activity, aw, from the known water content, w, in a substance
and known GAB parameters, wm, C, K.
From "GAB Generalized Equation for Sorption Phenomena", Food and Bioprocess Technology
March 2008 Vol 1, Issue 1, pp 82--90
aw/w = a + b*aw + c*aw^2 --> c*aw^2 + (b-1/w)*aw + a = 0
solution from quadratic equation
aw = (-(b-1/w) +/- sqrt((b-1/w)^2 - 4*c*a)) / (2*c)
where
a = 1/(wm*C*K)
b = (C-2)/(wm*C)
c = K(1-C)/(wm*C)
parameters
w: float - water content in component (mass water / mass component)
wm: float - GAB parameter
C: float - GAB parameter
K: float - GAB parameter
returns float - water activity
"""
a = 1/(wm*C*K)
b = (C-2)/(wm*C)
c = K*(1-C)/(wm*C)
arg = np.sqrt((b-1/w)**2 - 4*c*a)
# TODO How do we know which root to use?
hi = (-1*(b-1/w) + arg) / (2*C)
lo = (-1*(b-1/w) - arg) / (2*c)
# Relationship from Gary"s method (no reference)
aw = (np.sqrt(C) * np.sqrt((C*wm**2 - 2*C*wm*w + C*w**2 + 4*wm*w) - C*wm + C*w - 2*w)) / (2*(C-1)*K*w)
return aw # Is this the right relationship?
excipients = GAB.co
"""
Explanation: Notebook to calculate the water permeation into packaging.
End of explanation
"""
system = {
"time": {
"startDate": 0,
"endDate": 0
},
"conditions": {
"temperature": 40,
"rh": 50
},
"inner": {
"type": "PP",
"permeability": 0,
"volume": 10,
"desiccant": {
"type": "silica",
"mass": 10,
"GAB": {"wm": 0, "C": 0, "K": 0},
},
"product": {
"units": 10,
"unit_mass": 1,
"components": [{
"name": "component_A",
"frac": .2,
"GAB": {"wm": 0, "C":0, "K":0}}
]
}
},
"outer": {
"type": "PP",
"permeability": 0,
"volume": 10,
"desiccant": {
"type": "silica",
"mass": 10,
"GAB": {"wm": 0, "C": 0, "K": 0},
},
"product": {
"units": 10,
"unit_mass": 1,
"components": [{
"name": "component_A",
"frac": .2,
"GAB": {"wm": 0, "C":0, "K":0}
}]
}
},
"events": [{
"date": 0,
"event": "REMOVE_DESICCANT",
"mass": 0,
"desiccantWater": 0
}]
}
"""
Explanation: Define the simulation parameters
End of explanation
"""
def flux(P, rh_in, rh_out):
# Inner package water
w_inner = mass_water_vapor(T, rh, V) + mass_water_solid(aw, m, w) + mass_water_solid(aw, m, w)
# Outer package water
w_outer = mass_water_vapor(T, rh, V) + mass_water_solid(aw, m, w) + mass_water_solid(aw, m, w)
"""
Explanation: Loop through user events
Calculate the water flux between two points in time that have no user changes
End of explanation
"""
|
nwilbert/async-examples | notebook/generators.ipynb | mit | class TestIterator:
def __init__(self, max_value):
self._current_value = 0
self._max_value = max_value
def __next__(self):
self._current_value += 1
if self._current_value > self._max_value:
raise StopIteration()
return self._current_value
"""
Explanation: This is code for Python versions >= 3.3.
Iterables and Iterators in Python
Iterators
Iterator objects in Python provide a __next__ method. If the iteration has reached the end this is signaled by raising a StopIteration exception.
End of explanation
"""
iterator = TestIterator(3)
try:
while True:
print(next(iterator))
except StopIteration:
pass
"""
Explanation: When you perform the iteration manually you should use the builtin next function to call the magic __next__ method.
End of explanation
"""
for i in TestIterator(3):
print(i)
"""
Explanation: Of course you can also use a standard for-loop. However, the for-loop actually expects to be given a so called iterable object, not an iterator.
End of explanation
"""
list(TestIterator(3))
"""
Explanation: The same is the case for list constructors.
End of explanation
"""
class TestIterable:
def __init__(self, max_value):
self._max_value = max_value
def __iter__(self):
return TestIterator(self._max_value)
"""
Explanation: Iterables
Iterables are defined by having an __iter__ method that return an iterator.
End of explanation
"""
for i in TestIterable(3):
print(i)
"""
Explanation: Now we can finally use the standard for-loop:
End of explanation
"""
for i in [1, 2, 3]:
print(i)
"""
Explanation: This is convenient, because all the standard container classes are iterable. So you can directly put them into a for-loop or list constructor, without first having to manually create an iterator first.
End of explanation
"""
test_iterable = TestIterable(3)
test_iterator = iter(test_iterable)
print(test_iterable)
"""
Explanation: Usually on etherefore does not have to use the __iter__ method manually. But if you do, use the builtin iter function instead.
End of explanation
"""
class RealTestIterator(TestIterator):
def __iter__(self):
return self
"""
Explanation: It would be anoying (and quite surprising) to not be able to use iterators with for-loops. Therefore iterators in Python must include an __iter__ method as well, returning the iterator itself.
End of explanation
"""
for i in RealTestIterator(3):
print(i)
"""
Explanation: We can now use this iterator as expected. When the for-loop applies the iter function this works and has no effect on the iterator.
End of explanation
"""
iterator = RealTestIterator(3)
for i in iterator:
print(i)
for i in iterator:
# iterator directly raises StopIteration, so this is never reached
print(i)
"""
Explanation: But there is an important semantic difference between the __iter__ of iterables and iterators: iterables provide a fresh iterator object on each call and can therefore be iterated over multiple times. Iterators on the other hand are spent after the first iteration.
End of explanation
"""
def is_iterator(it):
return iter(it) is it
print(is_iterator(RealTestIterator(3)))
print(is_iterator(TestIterable(3)))
"""
Explanation: This can cause subtle bugs and is actually a nice example for the pitfalls of duck typing. One possible way to safeguard against this is by testing the semantics of __iter__:
End of explanation
"""
def test():
yield 1
yield 2
print(test)
print(test())
"""
Explanation: Generators
Generator Basics in Python
Every function that contains a yield keyword is a generator function. A generator function returns a generator object, which is a special case of an iterator (i.e., an object with a __next__ method and an __iter__ method that returns self).
End of explanation
"""
t = test()
try:
while True:
print(next(t))
except StopIteration:
print('done')
"""
Explanation: The iteration can be performed using the standard iterator API.
End of explanation
"""
for i in test():
print(i)
"""
Explanation: A generator object can be used anywhere an iterator is supported, e.g., for loops.
End of explanation
"""
def test():
x = yield 1
yield x**2
t = test()
print(next(t)) # go to the first yield
print(t.send(3))
"""
Explanation: Generators as Coroutines
Python 2.5 added the ability to not only get data from a generator, but also to send data to it. yield turned from a statement into an expression. Functions that use this feature are called coroutines.
End of explanation
"""
def test():
yield 1
yield 2
def wrapper():
for i in test():
yield i
for i in wrapper():
print(i)
"""
Explanation: Note that next(t) is equivalent to t.send(None).
Forwarding an iterator is easy:
End of explanation
"""
def test():
x = yield 1
yield x**2
def wrapper():
yield from test()
w = wrapper()
print(next(w))
print(w.send(3))
"""
Explanation: Doing the same with a coroutine on the other hand is quite hard (see PEP 380), so Python 3.3 introduced yield from.
yield from
Wrapping/forwarding coroutines with yield from is easy. This is, for example, important if you want to refactor a coroutine by extracting a sub-coroutine.
End of explanation
"""
def test():
for i in range(3):
yield i
return 'done'
for i in test():
print(i)
t = test()
try:
while True:
print(next(t))
except StopIteration as e:
print(e.value)
"""
Explanation: The same PEP also introduced return statements in coroutines, to transport a return value via StopIteration.
End of explanation
"""
def wrapper():
value = yield from test()
print('wrapper got:', value)
return 'wrapper done'
for i in wrapper():
print(i)
"""
Explanation: The return value also becomes the value of yield from:
End of explanation
"""
[xy for xy in range(3)]
xy
"""
Explanation: So yield from transparently pipes through the iterations and provides the end result value.
More random info about Generators
Yield and List Comprehensions (or Generator Expressions)
In older versions of Python the variables in list comprehensions would leak out. In Python 3 this is no longer the case:
End of explanation
"""
(xy for xy in range(3))
xy
"""
Explanation: List comprehensions now have their own execution context, just like functions and generator expressions.
End of explanation
"""
[i for i in range(3) if (yield i)]
"""
Explanation: A side effect of this is that a yield statement in some parts of a list comprehension causes it to evaluate to a generator object.
End of explanation
"""
set([i**2 for i in range(3) if (yield i)])
set([(yield i**2) for i in range(3)])
"""
Explanation: This can be surprising at first.
End of explanation
"""
def g():
return [i for i in (yield range(3))]
next(g())
"""
Explanation: Only the expression list part is not affected by this. A yield statement in this part of the list comprehension works as normally expected (i.e., it refers to the surrounding generator function).
End of explanation
"""
set(i**2 for i in range(3) if (yield i))
"""
Explanation: Generator expressions have always behaved like described above (since they are executed lazily they always had to store their context).
End of explanation
"""
{i**2 for i in range(3) if (yield i)}
{i: i**2 for i in range(3) if (yield i)}
"""
Explanation: Set and Dict comprehensions of course act like just list comprehensions.
End of explanation
"""
[i for i in range(3) if (yield from i)]
set([i for i in range(3) if (yield from i)])
"""
Explanation: With yield from we get the same behavior as with yield.
End of explanation
"""
import unittest.mock as mock
m = mock.Mock(side_effect=[1, 2])
def test():
yield m()
yield m()
yield m()
for i in test():
print(i)
"""
Explanation: Beware of StopIteration
A generator can be exited explicity by raising StopIteration. Unfortunately it doesn't matter from where this is raised. It might come from another iteration inside a nested function that is not caught properly.
End of explanation
"""
def test():
try:
i = 1
while True:
yield i
i += 1
except GeneratorExit:
print('done')
print('bye')
t = test()
print(next(t))
print(next(t))
t.close()
try:
print(next(t))
except StopIteration:
print('no more values')
"""
Explanation: So a simple error in setting up your mocks can silently cause an unexpected abortion in your asynchronois test code!
GeneratorExit, close and throw
As a counterpart to StopIteration you can signal a generator from the outside that it should finish. This is done by calling close() on the generator, which will raise a GeneratorExit exception.
End of explanation
"""
def test():
i = 1
while True:
yield i
i += 1
t = test()
print(next(t))
print(next(t))
t.close()
try:
print(next(t))
except StopIteration:
print('no more values')
"""
Explanation: Catching the GeneratorExit is not really necessary here. But if the generator has any resources that need cleanup then one can use a try ... finally or a context manager to perform this.
End of explanation
"""
def test():
try:
i = 1
while True:
yield i
i += 1
except GeneratorExit:
print('done')
yield 'just one more value'
t = test()
print(next(t))
print(next(t))
t.close()
"""
Explanation: Yielding values after the exception was raised is not supported.
End of explanation
"""
def test():
try:
i = 1
while True:
yield i
i += 1
except GeneratorExit:
print('done')
yield 'one more value'
yield 'and another one'
t = test()
print(next(t))
print(next(t))
print(t.throw(GeneratorExit()))
print(next(t))
"""
Explanation: Note that throwing the GeneratorExit exception manually does not have the same effect as calling close.
End of explanation
"""
|
zingale/hydro_examples | compressible/euler.ipynb | bsd-3-clause | from sympy.abc import rho
rho, u, c = symbols('rho u c')
A = Matrix([[u, rho, 0], [0, u, rho**-1], [0, c**2 * rho, u]])
A
"""
Explanation: Euler Equations
The Euler equations in primitive variable form, $q = (\rho, u, p)^\intercal$ appear as:
$$q_t + A(q) q_x = 0$$
with the matrix $A(q)$:
$$A(q) = \left ( \begin{array}{ccc} u & \rho & 0 \
0 & u & 1/\rho \
0 & \gamma p & u \end{array} \right )
$$
The sound speed is related to the adiabatic index, $\gamma$, as $c^2 = \gamma p /\rho$.
We can represent this matrix symbolically in SymPy and explore its eigensystem.
End of explanation
"""
A.eigenvals()
"""
Explanation: The eigenvalues are the speeds at which information propagates with. SymPy returns them as a
dictionary, giving the multiplicity for each eigenvalue.
End of explanation
"""
R = A.eigenvects() # this returns a tuple for each eigenvector with multiplicity -- unpack it
r = []
lam = []
for (ev, _, rtmp) in R:
r.append(rtmp[0])
lam.append(ev)
# we can normalize them anyway we want, so let's make the first entry 1
for n in range(len(r)):
v = r[n]
r[n] = v/v[0]
"""
Explanation: The right eigenvectors are what SymPy gives natively. For a given eigenvalue, $\lambda$, these
satisfy:
$$A r = \lambda r$$
Right Eigenvectors
End of explanation
"""
r[0]
"""
Explanation: 0-th right eigenvector
End of explanation
"""
lam[0]
"""
Explanation: this corresponds to the eigenvalue
End of explanation
"""
r[1]
"""
Explanation: 1-st right eigenvector
End of explanation
"""
lam[1]
"""
Explanation: this corresponds to the eigenvalue
End of explanation
"""
r[2]
"""
Explanation: 2-nd right eigenvector
End of explanation
"""
lam[2]
"""
Explanation: this corresponds to the eigenvalue
End of explanation
"""
R = zeros(3,3)
R[:,0] = r[1]
R[:,1] = r[0]
R[:,2] = r[2]
R
"""
Explanation: Here they are as a matrix, $R$, in order from smallest to largest eigenvalue
End of explanation
"""
B = A.transpose()
B
L = B.eigenvects()
l = []
laml = []
for (ev, _, ltmp) in L:
l.append(ltmp[0].transpose())
laml.append(ev)
"""
Explanation: Left Eigenvectors
The left eigenvectors satisfy:
$$l A = \lambda l$$
SymPy doesn't have a method to get left eigenvectors directly, so we take the transpose of this expression:
$$(l A)^\intercal = A^\intercal l^\intercal = \lambda l^\intercal$$
Therefore, the transpose of the left eigenvectors, $l^\intercal$, are the right eigenvectors of transpose of $A$
End of explanation
"""
for n in range(len(l)):
if lam[n] == laml[n]:
ltmp = l[n]
p = ltmp.dot(r[n])
l[n] = ltmp/p
"""
Explanation: Traditionally, we normalize these such that $l^{(\mu)} \cdot r^{(\nu)} = \delta_{\mu\nu}$
End of explanation
"""
l[0]
"""
Explanation: 0-th left eigenvector
End of explanation
"""
l[1]
"""
Explanation: 1-st left eigenvector
End of explanation
"""
l[2]
"""
Explanation: 2-nd left eigenvector
End of explanation
"""
ps = symbols('p_s')
As = Matrix([[u, rho, 0], [c**2/rho, u, ps/rho], [0, 0, u]])
As
As.eigenvals()
R = As.eigenvects() # this returns a tuple for each eigenvector with multiplicity -- unpack it
r = []
lam = []
for (ev, _, rtmp) in R:
r.append(rtmp[0])
lam.append(ev)
# we can normalize them anyway we want, so let's make the first entry 1
for n in range(len(r)):
v = r[n]
r[n] = v/v[0]
r[0], lam[0]
r[1], lam[1]
r[2], lam[2]
"""
Explanation: Entropy formulation
here we write the system in terms of $q_s = (\rho, u, s)^\intercal$, where the system is
$${q_s}_t + A_s(q_s) {q_s}_x = 0$$
and
$$
A_s = \left (\begin{matrix}u & \rho & 0\
\frac{c^{2}}{\rho} & u & \frac{p_{s}}{\rho}\
0 & 0 & u\end{matrix}\right)
$$
End of explanation
"""
Bs = As.transpose()
L = B.eigenvects()
l = []
laml = []
for (ev, _, ltmp) in L:
l.append(ltmp[0].transpose())
laml.append(ev)
"""
Explanation: left eigenvectors
End of explanation
"""
for n in range(len(l)):
if lam[n] == laml[n]:
ltmp = l[n]
p = ltmp.dot(r[n])
l[n] = ltmp/p
simplify(l[0])
l[1]
l[2]
"""
Explanation: normalization
End of explanation
"""
rho, u, v, c = symbols('rho u v c')
A = Matrix([[u, rho, 0, 0], [0, u, 0, rho**-1], [0,0, u, 0], [0, c**2 * rho, 0, u]])
A
A.eigenvals()
R = A.eigenvects() # this returns a tuple for each eigenvector with multiplicity -- unpack it
r = []
lam = []
for (ev, _, rtmp) in R:
for rv in rtmp:
r.append(rv)
lam.append(ev)
# we can normalize them anyway we want, so let's make the first entry 1
for n in range(len(r)):
v = r[n]
if not v[0] == 0:
r[n] = v/v[0]
r[0], lam[0]
r[1], lam[1]
r[2], lam[2]
r[3], lam[3]
"""
Explanation: 2-d system
End of explanation
"""
|
ddemidov/mba | python/example.ipynb | mit | cmin = [0.0, 0.0]
cmax = [1.0, 1.0]
coo = uniform(0, 1, (7,2))
val = uniform(0, 1, coo.shape[0])
"""
Explanation: Using MBA
cmin and cmax are coordinates of the bottom-left and the top-right corners of the bounding box containing scattered data. coo and val are arrays containing coordinates and values of the data points.
End of explanation
"""
n = 100
s = linspace(0,1,n)
x = array(meshgrid(s,s)).transpose([1,2,0]).copy()
"""
Explanation: Create $n \times n$ regular grid of coordinates to interpolate onto.
End of explanation
"""
def plot_surface(m0):
interp = mba2(cmin, cmax, [m0,m0], coo, val)
error = amax(abs(val - interp(coo))) / amax(abs(val))
v = interp(x)
pcolormesh(s, s, v, cmap='RdBu')
scatter(x=coo[:,0], y=coo[:,1], c=val, cmap='RdBu')
xlim([0,1])
ylim([0,1])
title("$m_0 = {0:}$, error = {1:.3e}".format(m0, error))
colorbar();
"""
Explanation: The plot_surface() function constructs MBA class with the given initial grid size, interpolates the input data over regular surface, and plots the results
End of explanation
"""
figure(figsize=(11,5))
subplot(121); plot_surface(2)
subplot(122); plot_surface(10)
tight_layout()
"""
Explanation: The smaller the initial grid size, the smoother the interpolated surface.
End of explanation
"""
%%timeit
interp = mba2(cmin, cmax, [3,3], coo, val)
%%timeit interp = mba2(cmin, cmax, [3,3], coo, val)
v = interp(x)
interp = mba2(cmin, cmax, [3,3], coo, val)
print(interp)
"""
Explanation: Report some timings and statistics about the constructed hierarchy:
End of explanation
"""
def test_initial(x0, y0, init, desc):
interp = mba1([0], [1], [8], x0, y0, init)
x = linspace(0, 1, 100).reshape(100,1)
y = interp(x)
plot(x, y, 'k-')
plot(x, [init(x) for x in x], 'k:')
plot(x0, y0, 'ro')
ylim([0,1])
title(desc)
x = [[0.3], [0.5], [0.7]]
v = [0.45, 0.55, 0.5, ]
figure(figsize=(12, 3))
subplot(131); test_initial(x, v, lambda x: 0.5, 'y = 0.5')
subplot(132); test_initial(x, v, lambda x: x[0], 'y = x')
subplot(133); test_initial(x, v, lambda x: 1-x[0], 'y = 1-x')
tight_layout()
"""
Explanation: Specifing the initial approximation
By default MBA uses linear approximation as an initial guess. Multilevel B-splines then are used to fit the difference between initial approximation and the actual data. Sometimes it may useful to provide an initial approximation that would better fit the underlying model. Here is a simple example demonstrating this:
End of explanation
"""
|
bmcmenamin/fa_kit | examples/Tutorial_Episode0.ipynb | mit | import os
import sys
sys.path.append(os.path.pardir)
%matplotlib inline
import numpy as np
from fa_kit import FactorAnalysis
from fa_kit import plotting as fa_plotting
"""
Explanation: Tutorial Episode 0: Setting up a Factor Analysis pipeline
In this notebook, I show you how to set up a factor analysis pipeline and demonstrate the different methods used for calculating the number of factors to retain.
End of explanation
"""
def make_random_data(n_samp=10000, n_feat=100):
"""
make some random data with correlated features
"""
data = np.random.randn(n_samp, n_feat)
signal_width = 10
signal_overlap = 2
step_size = signal_width - signal_overlap
for i in range(0, data.shape[1], step_size):
shared_signal = 0.3*np.random.randn(n_samp, 1)
data[:, i:(i+signal_width)] += shared_signal
return data
data = make_random_data()
"""
Explanation: Synthesizing fake data
This function will generate samples of fake data and store it in the n_samples-by-n_features matrix data.
The data is constructed so that there's structure in the data for the factor analysis to reveal. Specifically, there are 'bands' of 10 adjacent dimensions that are positively correlated with one another. And just to spice things up a bit more, each of these bands is slightly overlapped with it's neighbor.
End of explanation
"""
def run_pipeline(data, retain_method='broken_stick',
rotation_method='varimax', **kwargs):
# Set up the factor analysis object, indiate how to calculate the
# correlation matrix out of this input data.
fa = FactorAnalysis.load_data_samples(
data,
preproc_demean=True,
preproc_scale=True
)
# Extract the components
fa.extract_components()
# Calculate how many components to retain
# You can use any of these methods:
# 'top_n', 'top_pct', 'kaiser', 'broken_stick'
fa.find_comps_to_retain(
method=retain_method,
**kwargs
)
# Once you know how many to retain, re-extract with PAF
fa.reextract_using_paf()
# Apply factor rotation
# Right now there are both 'varimax' and 'quartimax'
fa.rotate_components(
method=rotation_method
)
# Plot summary figure
fig_summary = fa_plotting.graph_summary(fa)
return fig_summary
"""
Explanation: Setting up a factor analysis pipeline
The function run_pipeline will take a set of data and run through each of the steps in a factor analysis:
* Import the data and turn it into a n_features-by-n_features correlation matrix
* Extract the components using eigendecomposition
* Determine how many components to retain using one of the following methods:
* Retain Top N comps
* Retain Fixed percentage of variance
* Kaiser's criterion
* Comparison with broken-stick distribution
* Re-extract the k retained components using Principle Axis Factoring (PAF)
* Apply factor rotation using one these methods:
* Varimax
* Quartimax
* Make a pretty picture to summarize the results
End of explanation
"""
fig_topn = run_pipeline(data, retain_method='top_n', num_keep=5)
"""
Explanation: Demo: Top N retention
Below, we see the results when we run the pipeline while using the 'keep the top 5 components' retention method. The top panel in the graph is a scree-plot that shows us how the first 30 eigenvalues are distributed, and puts a dotted line where we set our top-5 cutoff. Looking at the graph, we should probably extract more than 5 components -- it looks like there's a natural dropoff at 12.
The middle panel shows us the five un-rotated components, and the lower panel shows us the five retained components after applying PAF and factor rotation. Even though we've under-extracted the number of components we need, there's still some clear structure with adjacent features loading onto the same components.
End of explanation
"""
fig_toppct = run_pipeline(data, retain_method='top_pct', pct_keep=0.2)
"""
Explanation: Demo: Top Percentage retention
Up next, we run the analysis using top_pct to keep however many components we need to capture a certain proportion of the overall covariance. We've set our cutoff to retain the top 20% of the overall signal.
The top panel has changed and now shows the cumulative proportion of variance explained and we have our cutoff placed at the 20% mark. In this view, it's harder to see the natural cutoff that occurs after the 12th component, but it's still there. And our 20% retention criteria almost gets there and decides to keep 11 components. The lower two panels are the same as the previous figure, but now with more components the banded structure of the input becomes more apparent.
End of explanation
"""
fig_kaiser = run_pipeline(data, retain_method='kaiser')
"""
Explanation: Demo: Kaiser's criterion
Now we use Kaiser's criterion to deteremine the number to retain. This criterion is inferred automatically from the dimension of the input data, but as you can see it really over-extracts and the factors look to be split up and noisy.
End of explanation
"""
fig_bs = run_pipeline(data, retain_method='broken_stick', rotation_method='varimax')
"""
Explanation: Demo: Best-fit Broken-stick
The distribution of eigenvalues in a covariance/correlation matrix often follow what appears to be a broken-stick distribution. Anecdotally, I've had a lot of luck with this method but often found that the broken stick didn't quite line up the way you'd expect. So, I've cooked up this variant that does a best-fit of a broken stick to the eigenvalue distribution. There's a little bit of magic in how it works to make sure that the fit isn't overly sensitive to the values in the first couple of components (because those are the ones that we expect to deviate from the broken stick and capture structure/signal).
Another benefit of this modified broken stick is that the method can be used with other types of n_features-by-n_features feature similariy matrices, such as co-occurance matrices, mutual information scores, etc.
However, as you'll see in Episode 2, it doesn't always do what you want. It's still a work in progress, but overall I'm really happy with how well it does as a fully-automatic method for calculating the number of features to retain.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_epochs_spectra.ipynb | bsd-3-clause | # Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
"""
Explanation: Compute the power spectral density of epochs
This script shows how to compute the power spectral density (PSD)
of measurements on epochs. It also shows how to plot its spatial
distribution.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
tmin, tmax, event_id = -1., 1., 1
raw.info['bads'] += ['MEG 2443'] # bads
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
proj=True, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# Let's first check out all channel types by averaging across epochs.
epochs.plot_psd(fmin=2, fmax=200)
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=False,
stim=False, exclude='bads')
# Now let's take a look at the spatial distributions of the psd.
epochs.plot_psd_topomap(ch_type='grad', normalize=True)
"""
Explanation: Set parameters
End of explanation
"""
|
sys-bio/tellurium | examples/notebooks/widgets/widgets_lorenz.ipynb | apache-2.0 | %matplotlib inline
from ipywidgets import interact, interactive
from IPython.display import clear_output, display, HTML
import numpy as np
from scipy import integrate
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
from matplotlib import animation
"""
Explanation: Exploring the Lorenz System of Differential Equations
In this Notebook we explore the Lorenz system of differential equations:
$$
\begin{aligned}
\dot{x} & = \sigma(y-x) \
\dot{y} & = \rho x - y - xz \
\dot{z} & = -\beta z + xy
\end{aligned}
$$
This is one of the classic systems in non-linear differential equations. It exhibits a range of different behaviors as the parameters ($\sigma$, $\beta$, $\rho$) are varied.
Imports
First, we import the needed things from IPython, NumPy, Matplotlib and SciPy.
End of explanation
"""
def solve_lorenz(N=10, angle=0.0, max_time=4.0, sigma=10.0, beta=8./3, rho=28.0):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
def lorenz_deriv(x_y_z, t0, sigma=sigma, beta=beta, rho=rho):
"""Compute the time-derivative of a Lorenz system."""
x, y, z = x_y_z
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(1)
x0 = -15 + 30 * np.random.random((N, 3))
# Solve for the trajectories
t = np.linspace(0, max_time, int(250*max_time))
x_t = np.asarray([integrate.odeint(lorenz_deriv, x0i, t)
for x0i in x0])
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N))
for i in range(N):
x, y, z = x_t[i,:,:].T
lines = ax.plot(x, y, z, '-', c=colors[i])
plt.setp(lines, linewidth=2)
ax.view_init(30, angle)
plt.show()
return t, x_t
"""
Explanation: Computing the trajectories and plotting the result
We define a function that can integrate the differential equations numerically and then plot the solutions. This function has arguments that control the parameters of the differential equation ($\sigma$, $\beta$, $\rho$), the numerical integration (N, max_time) and the visualization (angle).
End of explanation
"""
t, x_t = solve_lorenz(angle=0, N=10)
"""
Explanation: Let's call the function once to view the solutions. For this set of parameters, we see the trajectories swirling around two points, called attractors.
End of explanation
"""
w = interactive(solve_lorenz, angle=(0.,360.), N=(0,50), sigma=(0.0,50.0), rho=(0.0,50.0))
display(w)
"""
Explanation: Using IPython's interactive function, we can explore how the trajectories behave as we change the various parameters.
End of explanation
"""
t, x_t = w.result
w.kwargs
"""
Explanation: The object returned by interactive is a Widget object and it has attributes that contain the current result and arguments:
End of explanation
"""
xyz_avg = x_t.mean(axis=1)
xyz_avg.shape
"""
Explanation: After interacting with the system, we can take the result and perform further computations. In this case, we compute the average positions in $x$, $y$ and $z$.
End of explanation
"""
plt.hist(xyz_avg[:,0])
plt.title('Average $x(t)$')
plt.hist(xyz_avg[:,1])
plt.title('Average $y(t)$')
"""
Explanation: Creating histograms of the average positions (across different trajectories) show that on average the trajectories swirl about the attractors.
End of explanation
"""
|
hektor-monteiro/python-notebooks | MCMC-exemplo.ipynb | gpl-2.0 | import numpy as np
import matplotlib.pyplot as plt
xobs = np.array([1.,2.1,3.4,5.6,8.3,9.1,10.7,13.0])
yobs = np.array([6.24724,4.78879,8.82746,15.6056,16.2351,31.5331,8.88331,31.3041])
yobs_er = np.array([0.74,2.91,1.47,1.90,2.86,5.83,6.01,5.31]) # 30% error
plt.errorbar(xobs,yobs, yobs_er, fmt='o', capsize=5)
# ajustando com funções python
z = np.polyfit(xobs, yobs, 1, w=1./yobs_er)
ajuste = np.poly1d(z)
plt.plot(xobs,ajuste(xobs))
print(ajuste)
"""
Explanation: Um exemplo de MCMC
Vamos ver na pratica a criação de um "amostrador" resolvendo o problema de ajustar uma função a um conjunto de dados. Neste exemplo simplificado vamos tentar ajustar uma reta aos dados abaixo:
End of explanation
"""
def likelihood(x, y,yerr,a,b):
# definição do problema de ajuste de uma reta y=ax+b
L = 1.
for i in range(x.size):
L *= np.exp(-0.5*(y[i]-(a*x[i]+b))**2/yerr[i]**2)
#print(np.exp(-0.5*(y[i]-(a*x[i]+b))**2/yerr[i]**2))
# if (np.isfinite(L) == False):
# L = 0.
if (b<3. or b>5.):
L=1.e-300
return L
def proposal(a,b):
P = 1.
return P
"""
Explanation: Para resolver este problema precisamos definir algumas funções para simplificar a codificação. Para realizar a amostragem vamos usar o esquema básico de Metropolis–Hastings (https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm). Neste esquema precisamos definir a função verosemelhança do problema e a função de probabilidade que será usada para as amostragens propostas.
End of explanation
"""
MCsize = 100000
# vetores para receberem os passos gerados
am = np.zeros(MCsize)
bm = np.zeros(MCsize)
# vetor para receber as verosemelhanças calculadas
lik = np.zeros(MCsize)
for i in range(MCsize):
# assumimos uma função proposta uniforme
am_prop = np.random.uniform(0,6)
bm_prop = np.random.uniform(0,6)
if (i ==0):
# este é o ponto inicial da cadeia
am[i] = am_prop
bm[i] = bm_prop
continue
# calcula a razão verosemelhança
a1 = likelihood(xobs, yobs, yobs_er, am_prop, bm_prop) / likelihood(xobs, yobs, yobs_er, am[i-1], bm[i-1])
if (np.isfinite(a1) == False):
a1 = 1
# calcula a razão da proposta
a2 = proposal(am_prop, bm_prop) / proposal(am[i-1], bm[i-1])
a = a1*a2
if(a > 1.):
am[i] = am_prop
bm[i] = bm_prop
else:
# aceite aleatoriamente um passo com base na probabilidade a
prob = np.random.rand()
if(prob < a):
am[i] = am_prop
bm[i] = bm_prop
else:
am[i] = am[i-1]
bm[i] = bm[i-1]
#print(a1,a2,a,am[i],bm[i])
# plota a cadeia
plt.figure()
plt.plot(am,label='a')
plt.plot(bm,label='b')
plt.legend()
plt.figure()
plt.hist(am,bins='auto',label='a',alpha=0.5)
plt.hist(bm,bins='auto',label='b',alpha=0.5)
print(np.mean(am),np.mean(bm))
print(np.std(am),np.std(bm))
likelihood(xobs, yobs, yobs_er, 1.9,4)
"""
Explanation: Com as funções definidas montamos o amostrador:
End of explanation
"""
|
chrsclrk/Solution_Architecture_with_Ansible_Jupyter | Solution_Architecture_with_Ansible_and_Jupyter.ipynb | mit | import sys, platform, subprocess
ansibleVersion = subprocess.check_output(['ansible', '--version']).decode('utf-8').split()[1]
print( f" Python: {' '.join(sys.version.split()[0:4])}\n" # Not the version of Pythone used by Ansible.
f' macOS: {platform.mac_ver()[0]}\n' # Control machine operatings sysrtem.
f"Ansible: {subprocess.check_output(['ansible', '--version']).decode('utf-8').split()[1]}")
"""
Explanation: Ansible: Getting Started in a Jupyter Notebook
A Jupyter Notebook to follow along while reading "Ansible: Up and Running", 2nd Edition,
Lorin Hochstein, Rene Moser
http://shop.oreilly.com/product/0636920065500.do
Publisher: O'Reilly Media, Inc. Release Date: August 2017 ISBN: 9781491979808
Remote machines of interest to the control machine:
<img src="w530_aur_components.png"</>
Ansible Control Machine: what is the execution context?
subprocess.check_output() from page 546
"Python Cookbook" 3rd Edition, David Beazley and Brian K. Jones, 2013,
O'Reily media, ISBN 978-1-4493-4037-7, http://www.dabeaz.com/cookbook.html
End of explanation
"""
! nl -b a /etc/hosts | sed -n '74,77p;84,86p' | cut -c 1-100
"""
Explanation: View of the network, private, from /etc/hosts
(/etc/hosts display truncated to avoid line wrap)
End of explanation
"""
%cd '/Users/chrsclrk/Google Drive/solutionArchitect/automation'
"""
Explanation: Review of control machine's Ansible configuration
Let the Jupyter notebook know where to find files.
End of explanation
"""
! echo "*** inventory.ini contents ***" ; nl -ba controlMachine/inventory.ini | sed -n '1,7p'
! echo "*** ansible.cfg contents ***" ; cat /Users/chrsclrk/.ansible.cfg
"""
Explanation: Note use of group "aur" to provide value for "become" password.
End of explanation
"""
!ansible aur --inventory=controlMachine/inventory.ini --module-name=ping
"""
Explanation: is the control machine able to reach the remote machines?
Ansilble module, ping, replies with the string "ping" the control machine successfully connects with the host in the group aur.
End of explanation
"""
!ansible aur --inventory=controlMachine/inventory.ini --module-name=command --args=uptime
"""
Explanation: Here Ansible runs a program on the remote machines.
connectivity is established
uptime for the remote machines may be of interest than the string "pong".
End of explanation
"""
oneSetup = !ansible aur[0] --inventory=controlMachine/inventory.ini --module-name=setup
print(f'{len(oneSetup):>34} Metric of setup results; length of Jupyter reference to saved setup results.\n'
f'{type(oneSetup)} Type of Jupyter reference to save results.')
oneSetup # View facts from the first target machine.
"""
Explanation: Ansible's "setup" module; all it knows about a target machine
When Ansible's connects to a target machine it collects information as part of its setup.
The resulting data structure is collectively referred to as facts.
End of explanation
"""
!ansible aur --inventory=controlMachine/inventory.ini --module-name=setup --args='filter=ansible_default_ipv4'
"""
Explanation: Retrieve a subset of the facts.
From Loren Hochsetein's page
[https://github.com/lorin/ansible-quickref/blob/master/facts.rst]
End of explanation
"""
!ansible aur --inventory=controlMachine/inventory.ini --module-name=shell --args="date --rfc-3339=ns"
"""
Explanation: Report the target machines' date for rough idea of time synchrony.
From the control machine, Ansible concurrently accesses the target machines.
This example shows the three target machines are within a 100 milliseconds of each other.
End of explanation
"""
!ansible aur --inventory=controlMachine/inventory.ini --module-name=shell --args="/usr/sbin/ip -4 addr show eno16777728 | sed -n '2p' | cut -d ' ' -f 6"
"""
Explanation: Results from piping commands on the remote machines.
Three commands on the target host to yield an IPv4 address of a network device.
(CentOS uses the “Predictable Network Interface Names” convention.)
End of explanation
"""
! nl playbooks/ch04-98_facts_ip-mac.yaml
! ansible-playbook --verbose --inventory=controlMachine/inventory.ini --become playbooks/ch04-98_facts_ip-mac.yaml
"""
Explanation: Ansible Playbooks
One playbook with one task and three debug statments concerning the network adapater Ansible is using by default."
Ansible's debug module is used to
* print the IPv4 address
* print the MAC address
* print the IPv4 and MAC address togehter on one line
End of explanation
"""
|
tensorflow/hub | examples/colab/wav2vec2_saved_model_finetuning.ipynb | apache-2.0 | #@title Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: Copyright 2021 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
!pip3 install -q git+https://github.com/vasudevgupta7/gsoc-wav2vec2@main
!sudo apt-get install -y libsndfile1-dev
!pip3 install -q SoundFile
"""
Explanation: <table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/wav2vec2_saved_model_finetuning"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/wav2vec2_saved_model_finetuning.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/wav2vec2_saved_model_finetuning.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/wav2vec2_saved_model_finetuning.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
<td>
<a href="https://tfhub.dev/vasudevgupta7/wav2vec2/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a>
</td>
</table>
Fine-tuning Wav2Vec2 with an LM head
In this notebook, we will load the pre-trained wav2vec2 model from TFHub and will fine-tune it on LibriSpeech dataset by appending Language Modeling head (LM) over the top of our pre-trained model. The underlying task is to build a model for Automatic Speech Recognition i.e. given some speech, the model should be able to transcribe it into text.
Setting Up
Before running this notebook, please ensure that you are on GPU runtime (Runtime > Change runtime type > GPU). The following cell will install gsoc-wav2vec2 package & its dependencies.
End of explanation
"""
import os
import tensorflow as tf
import tensorflow_hub as hub
from wav2vec2 import Wav2Vec2Config
config = Wav2Vec2Config()
print("TF version:", tf.__version__)
"""
Explanation: Model setup using TFHub
We will start by importing some libraries/modules.
End of explanation
"""
pretrained_layer = hub.KerasLayer("https://tfhub.dev/vasudevgupta7/wav2vec2/1", trainable=True)
"""
Explanation: First, we will download our model from TFHub & will wrap our model signature with hub.KerasLayer to be able to use this model like any other Keras layer. Fortunately, hub.KerasLayer can do both in just 1 line.
Note: When loading model with hub.KerasLayer, model becomes a bit opaque but sometimes we need finer controls over the model, then we can load the model with tf.keras.models.load_model(...).
End of explanation
"""
AUDIO_MAXLEN = 246000
LABEL_MAXLEN = 256
BATCH_SIZE = 2
"""
Explanation: You can refer to this script in case you are interested in the model exporting script. Object pretrained_layer is the freezed version of Wav2Vec2Model. These pre-trained weights were converted from HuggingFace PyTorch pre-trained weights using this script.
Originally, wav2vec2 was pre-trained with a masked language modelling approach with the objective to identify the true quantized latent speech representation for a masked time step. You can read more about the training objective in the paper- wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations.
Now, we will be defining a few constants and hyper-parameters which will be useful in the next few cells. AUDIO_MAXLEN is intentionally set to 246000 as the model signature only accepts static sequence length of 246000.
End of explanation
"""
inputs = tf.keras.Input(shape=(AUDIO_MAXLEN,))
hidden_states = pretrained_layer(inputs)
outputs = tf.keras.layers.Dense(config.vocab_size)(hidden_states)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
"""
Explanation: In the following cell, we will wrap pretrained_layer & a dense layer (LM head) with the Keras's Functional API.
End of explanation
"""
model(tf.random.uniform(shape=(BATCH_SIZE, AUDIO_MAXLEN)))
model.summary()
"""
Explanation: The dense layer (defined above) is having an output dimension of vocab_size as we want to predict probabilities of each token in the vocabulary at each time step.
Setting up training state
In TensorFlow, model weights are built only when model.call or model.build is called for the first time, so the following cell will build the model weights for us. Further, we will be running model.summary() to check the total number of trainable parameters.
End of explanation
"""
from wav2vec2 import CTCLoss
LEARNING_RATE = 5e-5
loss_fn = CTCLoss(config, (BATCH_SIZE, AUDIO_MAXLEN), division_factor=BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
"""
Explanation: Now, we need to define the loss_fn and optimizer to be able to train the model. The following cell will do that for us. We will be using the Adam optimizer for simplicity. CTCLoss is a common loss type that is used for tasks (like ASR) where input sub-parts can't be easily aligned with output sub-parts. You can read more about CTC-loss from this amazing blog post.
CTCLoss (from gsoc-wav2vec2 package) accepts 3 arguments: config, model_input_shape & division_factor. If division_factor=1, then loss will simply get summed, so pass division_factor accordingly to get mean over batch.
End of explanation
"""
!wget https://www.openslr.org/resources/12/dev-clean.tar.gz -P ./data/train/
!tar -xf ./data/train/dev-clean.tar.gz -C ./data/train/
"""
Explanation: Loading & Pre-processing data
Let's now download the LibriSpeech dataset from the official website and set it up.
End of explanation
"""
ls ./data/train/
"""
Explanation: Note: We are using dev-clean configuration as this notebook is just for demonstration purposes, so we need a small amount of data. Complete training data can be easily downloaded from LibriSpeech website.
End of explanation
"""
data_dir = "./data/train/LibriSpeech/dev-clean/2428/83705/"
all_files = os.listdir(data_dir)
flac_files = [f for f in all_files if f.endswith(".flac")]
txt_files = [f for f in all_files if f.endswith(".txt")]
print("Transcription files:", txt_files, "\nSound files:", flac_files)
"""
Explanation: Our dataset lies in the LibriSpeech directory. Let's explore these files.
End of explanation
"""
def read_txt_file(f):
with open(f, "r") as f:
samples = f.read().split("\n")
samples = {s.split()[0]: " ".join(s.split()[1:]) for s in samples if len(s.split()) > 2}
return samples
"""
Explanation: Alright, so each sub-directory has many .flac files and a .txt file. The .txt file contains text transcriptions for all the speech samples (i.e. .flac files) present in that sub-directory.
We can load this text data as follows:
End of explanation
"""
import soundfile as sf
REQUIRED_SAMPLE_RATE = 16000
def read_flac_file(file_path):
with open(file_path, "rb") as f:
audio, sample_rate = sf.read(f)
if sample_rate != REQUIRED_SAMPLE_RATE:
raise ValueError(
f"sample rate (={sample_rate}) of your files must be {REQUIRED_SAMPLE_RATE}"
)
file_id = os.path.split(file_path)[-1][:-len(".flac")]
return {file_id: audio}
"""
Explanation: Similarly, we will define a function for loading a speech sample from a .flac file.
REQUIRED_SAMPLE_RATE is set to 16000 as wav2vec2 was pre-trained with 16K frequency and it's recommended to fine-tune it without any major change in data distribution due to frequency.
End of explanation
"""
from IPython.display import Audio
import random
file_id = random.choice([f[:-len(".flac")] for f in flac_files])
flac_file_path, txt_file_path = os.path.join(data_dir, f"{file_id}.flac"), os.path.join(data_dir, "2428-83705.trans.txt")
print("Text Transcription:", read_txt_file(txt_file_path)[file_id], "\nAudio:")
Audio(filename=flac_file_path)
"""
Explanation: Now, we will pick some random samples & will try to visualize them.
End of explanation
"""
def fetch_sound_text_mapping(data_dir):
all_files = os.listdir(data_dir)
flac_files = [os.path.join(data_dir, f) for f in all_files if f.endswith(".flac")]
txt_files = [os.path.join(data_dir, f) for f in all_files if f.endswith(".txt")]
txt_samples = {}
for f in txt_files:
txt_samples.update(read_txt_file(f))
speech_samples = {}
for f in flac_files:
speech_samples.update(read_flac_file(f))
assert len(txt_samples) == len(speech_samples)
samples = [(speech_samples[file_id], txt_samples[file_id]) for file_id in speech_samples.keys() if len(speech_samples[file_id]) < AUDIO_MAXLEN]
return samples
"""
Explanation: Now, we will combine all the speech & text samples and will define the function (in next cell) for that purpose.
End of explanation
"""
samples = fetch_sound_text_mapping(data_dir)
samples[:5]
"""
Explanation: It's time to have a look at a few samples ...
End of explanation
"""
from wav2vec2 import Wav2Vec2Processor
tokenizer = Wav2Vec2Processor(is_tokenizer=True)
processor = Wav2Vec2Processor(is_tokenizer=False)
def preprocess_text(text):
label = tokenizer(text)
return tf.constant(label, dtype=tf.int32)
def preprocess_speech(audio):
audio = tf.constant(audio, dtype=tf.float32)
return processor(tf.transpose(audio))
"""
Explanation: Note: We are loading this data into memory as we working with a small amount of dataset in this notebook. But for training on the complete dataset (~300 GBs), you will have to load data lazily. You can refer to this script to know more on that.
Let's pre-process the data now !!!
We will first define the tokenizer & processor using gsoc-wav2vec2 package. Then, we will do very simple pre-processing. processor will normalize raw speech w.r.to frames axis and tokenizer will convert our model outputs into the string (using the defined vocabulary) & will take care of the removal of special tokens (depending on your tokenizer configuration).
End of explanation
"""
def inputs_generator():
for speech, text in samples:
yield preprocess_speech(speech), preprocess_text(text)
"""
Explanation: Now, we will define the python generator to call the preprocessing functions we defined in above cells.
End of explanation
"""
output_signature = (
tf.TensorSpec(shape=(None), dtype=tf.float32),
tf.TensorSpec(shape=(None), dtype=tf.int32),
)
dataset = tf.data.Dataset.from_generator(inputs_generator, output_signature=output_signature)
BUFFER_SIZE = len(flac_files)
SEED = 42
dataset = dataset.shuffle(BUFFER_SIZE, seed=SEED)
"""
Explanation: Setting up tf.data.Dataset
Following cell will setup tf.data.Dataset object using its .from_generator(...) method. We will be using the generator object, we defined in the above cell.
Note: For distributed training (especially on TPUs), .from_generator(...) doesn't work currently and it is recommended to train on data stored in .tfrecord format (Note: The TFRecords should ideally be stored inside a GCS Bucket in order for the TPUs to work to the fullest extent).
You can refer to this script for more details on how to convert LibriSpeech data into tfrecords.
End of explanation
"""
dataset = dataset.padded_batch(BATCH_SIZE, padded_shapes=(AUDIO_MAXLEN, LABEL_MAXLEN), padding_values=(0.0, 0))
"""
Explanation: We will pass the dataset into multiple batches, so let's prepare batches in the following cell. Now, all the sequences in a batch should be padded to a constant length. We will use the.padded_batch(...) method for that purpose.
End of explanation
"""
dataset = dataset.prefetch(tf.data.AUTOTUNE)
"""
Explanation: Accelerators (like GPUs/TPUs) are very fast and often data-loading (& pre-processing) becomes the bottleneck during training as the data-loading part happens on CPUs. This can increase the training time significantly especially when there is a lot of online pre-processing involved or data is streamed online from GCS buckets. To handle those issues, tf.data.Dataset offers the .prefetch(...) method. This method helps in preparing the next few batches in parallel (on CPUs) while the model is making predictions (on GPUs/TPUs) on the current batch.
End of explanation
"""
num_train_batches = 10
num_val_batches = 4
train_dataset = dataset.take(num_train_batches)
val_dataset = dataset.skip(num_train_batches).take(num_val_batches)
"""
Explanation: Since this notebook is made for demonstration purposes, we will be taking first num_train_batches and will perform training over only that. You are encouraged to train on the whole dataset though. Similarly, we will evaluate only num_val_batches.
End of explanation
"""
model.compile(optimizer, loss=loss_fn)
"""
Explanation: Model training
For training our model, we will be directly calling .fit(...) method after compiling our model with .compile(...).
End of explanation
"""
history = model.fit(train_dataset, validation_data=val_dataset, epochs=3)
history.history
"""
Explanation: The above cell will set up our training state. Now we can initiate training with the .fit(...) method.
End of explanation
"""
save_dir = "finetuned-wav2vec2"
model.save(save_dir, include_optimizer=False)
"""
Explanation: Let's save our model with .save(...) method to be able to perform inference later. You can also export this SavedModel to TFHub by following TFHub documentation.
End of explanation
"""
!pip3 install -q datasets
from datasets import load_metric
metric = load_metric("wer")
@tf.function(jit_compile=True)
def eval_fwd(batch):
logits = model(batch, training=False)
return tf.argmax(logits, axis=-1)
"""
Explanation: Note: We are setting include_optimizer=False as we want to use this model for inference only.
Evaluation
Now we will be computing Word Error Rate over the validation dataset
Word error rate (WER) is a common metric for measuring the performance of an automatic speech recognition system. The WER is derived from the Levenshtein distance, working at the word level. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the percentage of words that were incorrectly predicted.
You can refer to this paper to learn more about WER.
We will use load_metric(...) function from HuggingFace datasets library. Let's first install the datasets library using pip and then define the metric object.
End of explanation
"""
from tqdm.auto import tqdm
for speech, labels in tqdm(val_dataset, total=num_val_batches):
predictions = eval_fwd(speech)
predictions = [tokenizer.decode(pred) for pred in predictions.numpy().tolist()]
references = [tokenizer.decode(label, group_tokens=False) for label in labels.numpy().tolist()]
metric.add_batch(references=references, predictions=predictions)
"""
Explanation: It's time to run the evaluation on validation data now.
End of explanation
"""
metric.compute()
"""
Explanation: We are using the tokenizer.decode(...) method for decoding our predictions and labels back into the text and will add them to the metric for WER computation later.
Now, let's calculate the metric value in following cell:
End of explanation
"""
finetuned_model = tf.keras.models.load_model(save_dir)
"""
Explanation: Note: Here metric value doesn't make any sense as the model is trained on very small data and ASR-like tasks often require a large amount of data to learn a mapping from speech to text. You should probably train on large data to get some good results. This notebook gives you a template to fine-tune a pre-trained speech model.
Inference
Now that we are satisfied with the training process & have saved the model in save_dir, we will see how this model can be used for inference.
First, we will load our model using tf.keras.models.load_model(...).
End of explanation
"""
!wget https://github.com/vasudevgupta7/gsoc-wav2vec2/raw/main/data/SA2.wav
"""
Explanation: Let's download some speech samples for performing inference. You can replace the following sample with your speech sample also.
End of explanation
"""
import numpy as np
speech, _ = sf.read("SA2.wav")
speech = np.pad(speech, (0, AUDIO_MAXLEN - len(speech)))
speech = tf.expand_dims(processor(tf.constant(speech)), 0)
outputs = finetuned_model(speech)
outputs
"""
Explanation: Now, we will read the speech sample using soundfile.read(...) and pad it to AUDIO_MAXLEN to satisfy the model signature. Then we will normalize that speech sample using the Wav2Vec2Processor instance & will feed it into the model.
End of explanation
"""
predictions = tf.argmax(outputs, axis=-1)
predictions = [tokenizer.decode(pred) for pred in predictions.numpy().tolist()]
predictions
"""
Explanation: Let's decode numbers back into text sequence using the Wav2Vec2tokenizer instance, we defined above.
End of explanation
"""
|
swirlingsand/deep-learning-foundations | sentiment-network/Sentiment Classification - Mini Project 2.ipynb | mit | def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
"""
Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
Twitter: @iamtrask
Blog: http://iamtrask.github.io
What You Should Already Know
neural networks, forward and back-propagation
stochastic gradient descent
mean squared error
and train/test splits
Where to Get Help if You Need it
Re-watch previous Udacity Lectures
Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)
Shoot me a tweet @iamtrask
Tutorial Outline:
Intro: The Importance of "Framing a Problem"
Curate a Dataset
Developing a "Predictive Theory"
PROJECT 1: Quick Theory Validation
Transforming Text to Numbers
PROJECT 2: Creating the Input/Output Data
Putting it all together in a Neural Network
PROJECT 3: Building our Neural Network
Understanding Neural Noise
PROJECT 4: Making Learning Faster by Reducing Noise
Analyzing Inefficiencies in our Network
PROJECT 5: Making our Network Train and Run Faster
Further Noise Reduction
PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary
Analysis: What's going on in the weights?
Lesson: Curate a Dataset
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: Lesson: Develop a Predictive Theory
End of explanation
"""
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
"""
Explanation: Project 1: Quick Theory Validation
End of explanation
"""
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
vocab = set(total_counts.keys())
# print(vocab)
vocab_size = len(vocab)
print(vocab_size)
import numpy as np
layer_0 = np.zeros((1, vocab_size))
layer_0
word2index = {}
for i, word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
""" Modify the global layer_0 to represent the vector form of review.
The element at a given index of layer_0 should represent \
how many times the given word occurs in the review.
Args:
review(string) - the string of the review
Returns:
None
"""
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
review_split = review.split(" ")
# print(review_split)
"""
word2index = An index of our vocabulary
For each word in our review,
append layer_0 with the index of that word?
"""
for word in review_split:
#print(layer_0)
layer_0[0][word2index[word]] += 1
#print(layer_0)
update_input_layer(reviews[0])
reviews[0]
layer_0
def get_target_for_label(label):
"""Convert a label to `0` or `1`.
Args:
label(string) - Either "POSITIVE" or "NEGATIVE".
Returns:
`0` or `1`.
"""
if label == "POSITIVE": return 1
elif label == "NEGATIVE": return 0
else: return "Error, not positive or negative"
get_target_for_label("NEGATIVE")
print(get_target_for_label(labels[0]))
labels[0]
"""
Explanation: Transforming Text into Numbers
End of explanation
"""
|
opencobra/cobrapy | documentation_builder/deletions.ipynb | gpl-2.0 | import pandas
from time import time
from cobra.io import load_model
from cobra.flux_analysis import (
single_gene_deletion, single_reaction_deletion, double_gene_deletion,
double_reaction_deletion)
cobra_model = load_model("textbook")
ecoli_model = load_model("iJO1366")
"""
Explanation: Simulating Deletions
End of explanation
"""
print('complete model: ', cobra_model.optimize())
with cobra_model:
cobra_model.reactions.PFK.knock_out()
print('pfk knocked out: ', cobra_model.optimize())
"""
Explanation: Knocking out single genes and reactions
A commonly asked question when analyzing metabolic models is what will happen if a certain reaction was not allowed to have any flux at all. This can tested using cobrapy by
End of explanation
"""
print('complete model: ', cobra_model.optimize())
with cobra_model:
cobra_model.genes.b1723.knock_out()
print('pfkA knocked out: ', cobra_model.optimize())
cobra_model.genes.b3916.knock_out()
print('pfkB knocked out: ', cobra_model.optimize())
"""
Explanation: For evaluating genetic manipulation strategies, it is more interesting to examine what happens if given genes are knocked out as doing so can affect no reactions in case of redundancy, or more reactions if gene when is participating in more than one reaction.
End of explanation
"""
deletion_results = single_gene_deletion(cobra_model)
"""
Explanation: Single Deletions
Perform all single gene deletions on a model
End of explanation
"""
single_gene_deletion(cobra_model, cobra_model.genes[:20])
"""
Explanation: These can also be done for only a subset of genes
End of explanation
"""
single_reaction_deletion(cobra_model, cobra_model.reactions[:20])
"""
Explanation: This can also be done for reactions
End of explanation
"""
double_gene_deletion(
cobra_model, cobra_model.genes[-5:]).round(4)
"""
Explanation: Double Deletions
Double deletions run in a similar way.
End of explanation
"""
start = time() # start timer()
double_gene_deletion(
ecoli_model, ecoli_model.genes[:25], processes=2)
t1 = time() - start
print("Double gene deletions for 200 genes completed in "
"%.2f sec with 2 cores" % t1)
start = time() # start timer()
double_gene_deletion(
ecoli_model, ecoli_model.genes[:25], processes=1)
t2 = time() - start
print("Double gene deletions for 200 genes completed in "
"%.2f sec with 1 core" % t2)
print("Speedup of %.2fx" % (t2 / t1))
"""
Explanation: By default, the double deletion function will automatically use multiprocessing, splitting the task over up to 4 cores if they are available. The number of cores can be manually specified as well. Setting use of a single core will disable use of the multiprocessing library, which often aids debugging.
End of explanation
"""
double_reaction_deletion(
cobra_model, cobra_model.reactions[2:7]).round(4)
"""
Explanation: Double deletions can also be run for reactions.
End of explanation
"""
single = single_reaction_deletion(cobra_model)
double = double_reaction_deletion(cobra_model)
print(single.knockout["ATPM"])
print(double.knockout[{"ATPM", "TKT1"}])
"""
Explanation: Accessing individual deletion results
Note that the indices for deletions are python set objects. This is the appropriate type since the order of deletions does not matter. Deleting reaction 1 and reaction 2 will have the same effect as deleting reaction 2 and reaction 1.
To make it easier to access results all DataFrames returned by COBRAPpy deletion functions have a knockout indexer that makes that a bit simpler. Each entry in the indexer is treated as a single deletion entry. So you need to pass sets for double deletions.
End of explanation
"""
atpm = cobra_model.reactions.ATPM
tkt1 = cobra_model.reactions.TKT1
pfk = cobra_model.reactions.PFK
print(single.knockout[atpm, tkt1, pfk])
print(double.knockout[{atpm, tkt1}, {atpm, pfk}, {atpm}])
"""
Explanation: This can be used to get several deletions at once and will also work for Reaction or Gene objects (depending on what you deleted) directly.
End of explanation
"""
|
kit-cel/wt | SC468/BIAWGN_Capacity.ipynb | gpl-2.0 | import numpy as np
import scipy.integrate as integrate
import matplotlib.pyplot as plt
"""
Explanation: Capacity of the Binary-Input AWGN (BI-AWGN) Channel
This code is provided as supplementary material of the OFC short course SC468
This code illustrates
* Calculating the capacity of the binary input AWGN channel using numerical integration
* Capacity as a function of $E_s/N_0$ and $E_b/N_0$
End of explanation
"""
def f_YgivenX(y,x,sigman):
return np.exp(-((y-x)**2)/(2*sigman**2))/np.sqrt(2*np.pi)/sigman
"""
Explanation: Conditional pdf $f_{Y|X}(y|x)$ for a channel with noise variance (per dimension) $\sigma_n^2$. This is merely the Gaussian pdf with mean $x$ and variance $\sigma_n^2$
End of explanation
"""
def f_Y(y,sigman):
return 0.5*(f_YgivenX(y,+1,sigman)+f_YgivenX(y,-1,sigman))
"""
Explanation: Output pdf $f_Y(y) = \frac12[f_{Y|X}(y|X=+1)+f_{Y|X}(y|X=-1)]$
End of explanation
"""
def integrand(y, sigman):
value = f_Y(y,sigman)
if value < 1e-20:
return_value = 0
else:
return_value = value * np.log2(value)
return return_value
"""
Explanation: This is the function we like to integrate, $f_Y(y)\cdot\log_2(f_Y(y))$. We need to take special care of the case when the input is 0, as we defined $0\cdot\log_2(0)=0$, which is usually treated as "nan"
End of explanation
"""
def C_BIAWGN(sigman):
# numerical integration of the h(Y) part
integral = integrate.quad(integrand, -np.inf, np.inf, args=(sigman))[0]
# take into account h(Y|X)
return -integral - 0.5*np.log2(2*np.pi*np.exp(1)*sigman**2)
"""
Explanation: Compute the capacity using numerical integration. We have
\begin{equation}
C_{\text{BI-AWGN}} = -\int_{-\infty}^\infty f_Y(y)\log_2(f_Y(y))\mathrm{d}y - \frac12\log_2(2\pi e\sigma_n^2)
\end{equation}
End of explanation
"""
# alternative method using Gauss-Hermite Quadrature (see https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature)
# use 40 components to approximate the integral, should be sufficiently exact
x_GH, w_GH = np.polynomial.hermite.hermgauss(40)
print(w_GH)
def C_BIAWGN_GH(sigman):
integral_xplus1 = np.sum(w_GH * [np.log2(f_Y(np.sqrt(2)*sigman*xi + 1, sigman)) for xi in x_GH])
integral_xminus1 = np.sum(w_GH * [np.log2(f_Y(np.sqrt(2)*sigman*xi - 1, sigman)) for xi in x_GH])
integral = (integral_xplus1 + integral_xminus1)/2/np.sqrt(np.pi)
return -integral - 0.5*np.log2(2*np.pi*np.exp(1)*sigman**2)
"""
Explanation: This is an alternative way of calculating the capacity by approximating the integral using the Gauss-Hermite Quadrature (https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature). The Gauss-Hermite quadrature states that
\begin{equation}
\int_{-\infty}^\infty e^{-x^2}f(x)\mathrm{d}x \approx \sum_{i=1}^nw_if(x_i)
\end{equation}
where $w_i$ and $x_i$ are the respective weights and roots that are given by the Hermite polynomials.
We have to rearrange the integral $I = \int_{-\infty}^\infty f_Y(y)\log_2(f_Y(y))\mathrm{d}y$ a little bit to put it into a form suitable for the Gauss-Hermite quadrature
\begin{align}
I &= \frac{1}{2}\sum_{x\in{\pm 1}}\int_{-\infty}^\infty f_{Y|X}(y|X=x)\log_2(f_Y(y))\mathrm{d}y \
&= \frac{1}{2}\sum_{x\in{\pm 1}}\int_{-\infty}^\infty \frac{1}{\sqrt{2\pi}\sigma_n}e^{-\frac{(y-x)^2}{2\sigma_n^2}}\log_2(f_Y(y))\mathrm{d}y \
&\stackrel{(a)}{=} \frac{1}{2}\sum_{x\in{\pm 1}}\int_{-\infty}^\infty \frac{1}{\sqrt{\pi}}e^{-z^2}\log_2(f_Y(\sqrt{2}\sigma_n z + x))\mathrm{d}z \
&\approx \frac{1}{2\sqrt{\pi}}\sum_{x\in{\pm 1}} \sum_{i=1}^nw_i \log_2(f_Y(\sqrt{2}\sigma_n x_i + x))
\end{align}
where in $(a)$, we substitute $z = \frac{y-x}{\sqrt{2}\sigma}$
End of explanation
"""
esno_dB_range = np.linspace(-16,10,100)
# convert dB to linear
esno_lin_range = [10**(esno_db/10) for esno_db in esno_dB_range]
# compute sigma_n
sigman_range = [np.sqrt(1/2/esno_lin) for esno_lin in esno_lin_range]
capacity_BIAWGN = [C_BIAWGN(sigman) for sigman in sigman_range]
# capacity of the AWGN channel
capacity_AWGN = [0.5*np.log2(1+1/(sigman**2)) for sigman in sigman_range]
"""
Explanation: Compute the capacity for a range of of $E_s/N_0$ values (given in dB)
End of explanation
"""
fig = plt.figure(1,figsize=(15,7))
plt.subplot(121)
plt.plot(esno_dB_range, capacity_AWGN)
plt.plot(esno_dB_range, capacity_BIAWGN)
plt.xlim((-10,10))
plt.ylim((0,2))
plt.xlabel('$E_s/N_0$ (dB)',fontsize=16)
plt.ylabel('Capacity (bit/channel use)',fontsize=16)
plt.grid(True)
plt.legend(['AWGN','BI-AWGN'],fontsize=14)
# plot Eb/N0 . Note that in this case, the rate that is used for calculating Eb/N0 is the capcity
# Eb/N0 = 1/r (Es/N0)
plt.subplot(122)
plt.plot(esno_dB_range - 10*np.log10(capacity_AWGN), capacity_AWGN)
plt.plot(esno_dB_range - 10*np.log10(capacity_BIAWGN), capacity_BIAWGN)
plt.xlim((-2,10))
plt.ylim((0,2))
plt.xlabel('$E_b/N_0$ (dB)',fontsize=16)
plt.ylabel('Capacity (bit/channel use)',fontsize=16)
plt.grid(True)
from scipy.stats import norm
# first compute the BSC error probability
# the Q function (1-CDF) is also often called survival function (sf)
delta_range = [norm.sf(1/sigman) for sigman in sigman_range]
capacity_BIAWGN_hard = [1+delta*np.log2(delta)+(1-delta)*np.log2(1-delta) for delta in delta_range]
fig = plt.figure(1,figsize=(15,7))
plt.subplot(121)
plt.plot(esno_dB_range, capacity_AWGN)
plt.plot(esno_dB_range, capacity_BIAWGN)
plt.plot(esno_dB_range, capacity_BIAWGN_hard)
plt.xlim((-10,10))
plt.ylim((0,2))
plt.xlabel('$E_s/N_0$ (dB)',fontsize=16)
plt.ylabel('Capacity (bit/channel use)',fontsize=16)
plt.grid(True)
plt.legend(['AWGN','BI-AWGN', 'Hard BI-AWGN'],fontsize=14)
# plot Eb/N0 . Note that in this case, the rate that is used for calculating Eb/N0 is the capcity
# Eb/N0 = 1/r (Es/N0)
plt.subplot(122)
plt.plot(esno_dB_range - 10*np.log10(capacity_AWGN), capacity_AWGN)
plt.plot(esno_dB_range - 10*np.log10(capacity_BIAWGN), capacity_BIAWGN)
plt.plot(esno_dB_range - 10*np.log10(capacity_BIAWGN_hard), capacity_BIAWGN_hard)
plt.xlim((-2,10))
plt.ylim((0,2))
plt.xlabel('$E_b/N_0$ (dB)',fontsize=16)
plt.ylabel('Capacity (bit/channel use)',fontsize=16)
plt.grid(True)
W = 4
"""
Explanation: Plot the capacity curves as a function of $E_s/N_0$ (in dB) and $E_b/N_0$ (in dB). In order to calculate $E_b/N_0$, we recall from the lecture that
\begin{equation}
\frac{E_s}{N_0} = r\cdot \frac{E_b}{N_0}\qquad\Rightarrow\qquad\frac{E_b}{N_0} = \frac{1}{r}\cdot \frac{E_s}{N_0}
\end{equation}
Next, we know that the best rate that can be achieved is the capacity, i.e., $r=C$. Hence, we get $\frac{E_b}{N_0}=\frac{1}{C}\cdot\frac{E_s}{N_0}$. Converting to decibels yields
\begin{align}
\frac{E_b}{N_0}\bigg|{\textrm{dB}} &= 10\cdot\log{10}\left(\frac{1}{C}\cdot\frac{E_s}{N_0}\right) \
&= 10\cdot\log_{10}\left(\frac{1}{C}\right) + 10\cdot\log_{10}\left(\frac{E_s}{N_0}\right) \
&= \frac{E_s}{N_0}\bigg|{\textrm{dB}} - 10\cdot\log{10}(C)
\end{align}
End of explanation
"""
|
dswah/pyGAM | doc/source/notebooks/quick_start.ipynb | apache-2.0 | from pygam.datasets import wage
X, y = wage()
"""
Explanation: Quick Start
This quick start will show how to do the following:
Install everything needed to use pyGAM.
fit a regression model with custom terms
search for the best smoothing parameters
plot partial dependence functions
Install pyGAM
Pip
pip install pygam
Conda
pyGAM is on conda-forge, however this is typically less up-to-date:
conda install -c conda-forge pygam
Bleeding edge
You can install the bleeding edge from github using flit.
First clone the repo, cd into the main directory and do:
pip install flit
flit install
Get pandas and matplotlib
pip install pandas matplotlib
Fit a Model
Let's get to it. First we need some data:
End of explanation
"""
from pygam import LinearGAM, s, f
gam = LinearGAM(s(0) + s(1) + f(2)).fit(X, y)
"""
Explanation: Now let's import a GAM that's made for regression problems.
Let's fit a spline term to the first 2 features, and a factor term to the 3rd feature.
End of explanation
"""
gam.summary()
"""
Explanation: Let's take a look at the model fit:
End of explanation
"""
gam = LinearGAM(s(0, n_splines=5) + s(1) + f(2)).fit(X, y)
"""
Explanation: Even though we have 3 terms with a total of (20 + 20 + 5) = 45 free variables, the default smoothing penalty (lam=0.6) reduces the effective degrees of freedom to just ~25.
By default, the spline terms, s(...), use 20 basis functions. This is a good starting point. The rule of thumb is to use a fairly large amount of flexibility, and then let the smoothing penalty regularize the model.
However, we can always use our expert knowledge to add flexibility where it is needed, or remove basis functions, and make fitting easier:
End of explanation
"""
print(gam.lam)
"""
Explanation: Automatically tune the model
By default, spline terms, s() have a penalty on their 2nd derivative, which encourages the functions to be smoother, while factor terms, f() and linear terms l(), have a l2, ie ridge penalty, which encourages them to take on smaller values.
lam, short for $\lambda$, controls the strength of the regularization penalty on each term. Terms can have multiple penalties, and therefore multiple lam.
End of explanation
"""
import numpy as np
lam = np.logspace(-3, 5, 5)
lams = [lam] * 3
gam.gridsearch(X, y, lam=lams)
gam.summary()
"""
Explanation: Our model has 3 lam parameters, currently just one per term.
Let's perform a grid-search over multiple lam values to see if we can improve our model.
We will seek the model with the lowest generalized cross-validation (GCV) score.
Our search space is 3-dimensional, so we have to be conservative with the number of points we consider per dimension.
Let's try 5 values for each smoothing parameter, resulting in a total of 5*5*5 = 125 points in our grid.
End of explanation
"""
lams = np.random.rand(100, 3) # random points on [0, 1], with shape (100, 3)
lams = lams * 6 - 3 # shift values to -3, 3
lams = 10 ** lams # transforms values to 1e-3, 1e3
random_gam = LinearGAM(s(0) + s(1) + f(2)).gridsearch(X, y, lam=lams)
random_gam.summary()
"""
Explanation: This is quite a bit better. Even though the in-sample $R^2$ value is lower, we can expect our model to generalize better because the GCV error is lower.
We could be more rigorous by using a train/test split, and checking our model's error on the test set. We were also quite lazy and only tried 125 values in our hyperopt. We might find a better model if we spent more time searching across more points.
For high-dimensional search-spaces, it is sometimes a good idea to try a randomized search.
We can acheive this by using numpy's random module:
End of explanation
"""
gam.statistics_['GCV'] < random_gam.statistics_['GCV']
"""
Explanation: In this case, our deterministic search found a better model:
End of explanation
"""
list(gam.statistics_.keys())
"""
Explanation: The statistics_ attribute is populated after the model has been fitted.
There are lots of interesting model statistics to check out, although many are automatically reported in the model summary:
End of explanation
"""
import matplotlib.pyplot as plt
for i, term in enumerate(gam.terms):
if term.isintercept:
continue
XX = gam.generate_X_grid(term=i)
pdep, confi = gam.partial_dependence(term=i, X=XX, width=0.95)
plt.figure()
plt.plot(XX[:, term.feature], pdep)
plt.plot(XX[:, term.feature], confi, c='r', ls='--')
plt.title(repr(term))
plt.show()
"""
Explanation: Partial Dependence Functions
One of the most attractive properties of GAMs is that we can decompose and inspect the contribution of each feature to the overall prediction.
This is done via partial dependence functions.
Let's plot the partial dependence for each term in our model, along with a 95% confidence interval for the estimated function.
End of explanation
"""
|
jlawman/jlawman.github.io | content/deep-learning/.ipynb_checkpoints/Activation Functions-Back-up-checkpoint.ipynb | mit | import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
z = np.linspace(-5,5,num=1000)
"""
Explanation: Deep Learning activation functions examined below include ReLU, Leaky ReLU Sigmoid, tanh
End of explanation
"""
def draw_activation_plot(a,quadrants=2,y_ticks=[0],y_lim=[0,5]):
#Create figure and axis
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#Move left axis
ax.spines['left'].set_position('center')
# Remove top and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#Set x and y labels
plt.xlabel('z')
plt.ylabel('a')
#Set ticks
plt.xticks([])
plt.yticks(y_ticks)
#Set ylim
plt.ylim(y_lim)
#4 Quadrant conditions
if quadrants==4:
#Move bottom axis
ax.spines['bottom'].set_position('center')
#Move x and y labels
ax.yaxis.set_label_coords(.48,.75)
ax.xaxis.set_label_coords(.75,.48)
plt.plot(z,a);
"""
Explanation: Create plot drawing function
End of explanation
"""
relu = np.maximum(z,0)
draw_activation_plot(relu)
def draw_2_quad_plot(a):
#Create figure and axis
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#Move bottom and left axes
ax.spines['left'].set_position('center')
#ax.spines['bottom'].set_position('center')
# Remove top and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#Set x and y labels
plt.xlabel('z')
plt.ylabel('a')
#Set ticks
plt.xticks([])
plt.yticks([0])
plt.ylim([0,5])
plt.plot(z,a);
relu = np.maximum(z,0)
draw_2_quad_plot(relu)
"""
Explanation: ReLU
Great default choice for hidden layers. It is frequently used in industry and is almost always adequete to solve a problem.
End of explanation
"""
leaky_ReLU = np.maximum(0.01*z,z)
draw_4_quad_plot(tanh)
"""
Explanation: Leaky ReLU
Can help by providing differentiable point at 0.
End of explanation
"""
def draw_4_quad_plot(a):
#Create figure and axis
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#Move bottom and left axes
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
# Remove top and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#Set x and y labels
plt.xlabel('z')
plt.ylabel('a')
#Move x and y labels
ax.yaxis.set_label_coords(.48,.75)
ax.xaxis.set_label_coords(.75,.48)
#Set ticks
plt.xticks([])
plt.yticks([-1,0,1])
plt.plot(z,a);
tanh = (np.exp(z)-np.exp(-z))/(np.exp(z)+np.exp(-z))
draw_4_quad_plot(tanh)
"""
Explanation: tanh
Usually strictly better than sigmoid
End of explanation
"""
sigmoid = 1/(1+np.exp(-z))
draw_2_quad_plot(sigmoid)
#Create z and sigma
sigma = 1/(1+np.exp(-z))
#Draw prediction cut-off line
plt.axhline(0.5, color='black',ls='--')
#Label axis
plt.xlabel('z')
plt.ylabel(r'$\hat{y}$')
#Plot graph
plt.tick_params(axis='x',bottom='off',labelbottom='off')
plt.plot(z,sigma,'-',lw=3);
"""
Explanation: sigmoid
Almost never used except in output layer when dealing with binary classification.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/image_classification/solutions/5_fashion_mnist_class.ipynb | apache-2.0 | # TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
"""
Explanation: Train a Neural Network Model to Classify Images
Learning Objectives
Undersand how to read and display image data
Pre-process image data
Build, compile, and train a neural network model
Make and verify predictions
Introduction
This lab trains a neural network model to classify images of clothing, such as sneakers and shirts. You will learn how to read and display image data, pre-process image data, build, compile, and train a neural network model, and make and verify predictions
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
End of explanation
"""
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
"""
Explanation: Import the Fashion MNIST dataset
This lab uses the Fashion MNIST dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here:
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>
</td></tr>
</table>
Fashion MNIST is intended as a drop-in replacement for the classic MNIST dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc.) in a format identical to that of the articles of clothing you'll use here.
This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.
Here, 60,000 images are used to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow. Import and load the Fashion MNIST data directly from TensorFlow:
End of explanation
"""
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
"""
Explanation: Loading the dataset returns four NumPy arrays:
The train_images and train_labels arrays are the training set—the data the model uses to learn.
The model is tested against the test set, the test_images, and test_labels arrays.
The images are 28x28 NumPy arrays, with pixel values ranging from 0 to 255. The labels are an array of integers, ranging from 0 to 9. These correspond to the class of clothing the image represents:
<table>
<tr>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
Each image is mapped to a single label. Since the class names are not included with the dataset, store them here to use later when plotting the images:
End of explanation
"""
train_images.shape
"""
Explanation: Explore the data
Let's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels:
End of explanation
"""
len(train_labels)
"""
Explanation: Likewise, there are 60,000 labels in the training set:
End of explanation
"""
train_labels
"""
Explanation: Each label is an integer between 0 and 9:
End of explanation
"""
test_images.shape
"""
Explanation: There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels:
End of explanation
"""
len(test_labels)
"""
Explanation: And the test set contains 10,000 images labels:
End of explanation
"""
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
"""
Explanation: Preprocess the data
The data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255:
End of explanation
"""
train_images = train_images / 255.0
test_images = test_images / 255.0
"""
Explanation: Scale these values to a range of 0 to 1 before feeding them to the neural network model. To do so, divide the values by 255. It's important that the training set and the testing set be preprocessed in the same way:
End of explanation
"""
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
"""
Explanation: To verify that the data is in the correct format and that you're ready to build and train the network, let's display the first 25 images from the training set and display the class name below each image.
End of explanation
"""
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
"""
Explanation: Build the model
Building the neural network requires configuring the layers of the model, then compiling the model.
Set up the layers
The basic building block of a neural network is the layer. Layers extract representations from the data fed into them. Hopefully, these representations are meaningful for the problem at hand.
Most of deep learning consists of chaining together simple layers. Most layers, such as tf.keras.layers.Dense, have parameters that are learned during training.
End of explanation
"""
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
"""
Explanation: The first layer in this network, tf.keras.layers.Flatten, transforms the format of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two tf.keras.layers.Dense layers. These are densely connected, or fully connected, neural layers. The first Dense layer has 128 nodes (or neurons). The second (and last) layer returns a logits array with length of 10. Each node contains a score that indicates the current image belongs to one of the 10 classes.
Compile the model
Before the model is ready for training, it needs a few more settings. These are added during the model's compile step:
Loss function —This measures how accurate the model is during training. You want to minimize this function to "steer" the model in the right direction.
Optimizer —This is how the model is updated based on the data it sees and its loss function.
Metrics —Used to monitor the training and testing steps. The following example uses accuracy, the fraction of the images that are correctly classified.
End of explanation
"""
model.fit(train_images, train_labels, epochs=10)
"""
Explanation: Train the model
Training the neural network model requires the following steps:
Feed the training data to the model. In this example, the training data is in the train_images and train_labels arrays.
The model learns to associate images and labels.
You ask the model to make predictions about a test set—in this example, the test_images array.
Verify that the predictions match the labels from the test_labels array.
Feed the model
To start training, call the model.fit method—so called because it "fits" the model to the training data:
End of explanation
"""
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
"""
Explanation: As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.91 (or 91%) on the training data.
Evaluate accuracy
Next, compare how the model performs on the test dataset:
End of explanation
"""
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
"""
Explanation: It turns out that the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy represents overfitting. Overfitting happens when a machine learning model performs worse on new, previously unseen inputs than it does on the training data. An overfitted model "memorizes" the noise and details in the training dataset to a point where it negatively impacts the performance of the model on the new data. For more information, see the following:
* Demonstrate overfitting
* Strategies to prevent overfitting
Make predictions
With the model trained, you can use it to make predictions about some images.
The model's linear outputs, logits. Attach a softmax layer to convert the logits to probabilities, which are easier to interpret.
End of explanation
"""
predictions[0]
"""
Explanation: Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
End of explanation
"""
np.argmax(predictions[0])
"""
Explanation: A prediction is an array of 10 numbers. They represent the model's "confidence" that the image corresponds to each of the 10 different articles of clothing. You can see which label has the highest confidence value:
End of explanation
"""
test_labels[0]
"""
Explanation: So, the model is most confident that this image is an ankle boot, or class_names[9]. Examining the test label shows that this classification is correct:
End of explanation
"""
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
"""
Explanation: Graph this to look at the full set of 10 class predictions.
End of explanation
"""
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
"""
Explanation: Verify predictions
With the model trained, you can use it to make predictions about some images.
Let's look at the 0th image, predictions, and prediction array. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percentage (out of 100) for the predicted label.
End of explanation
"""
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
"""
Explanation: Let's plot several images with their predictions. Note that the model can be wrong even when very confident.
End of explanation
"""
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
"""
Explanation: Use the trained model
Finally, use the trained model to make a prediction about a single image.
End of explanation
"""
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
"""
Explanation: tf.keras models are optimized to make predictions on a batch, or collection, of examples at once. Accordingly, even though you're using a single image, you need to add it to a list:
End of explanation
"""
predictions_single = probability_model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
"""
Explanation: Now predict the correct label for this image:
End of explanation
"""
np.argmax(predictions_single[0])
"""
Explanation: keras.Model.predict returns a list of lists—one list for each image in the batch of data. Grab the predictions for our (only) image in the batch:
End of explanation
"""
|
okkhoy/pyDataAnalysis | ml-regression/week1/ML-Regression-W1.ipynb | mit | crime_rate_data = graphlab.SFrame.read_csv('Philadelphia_Crime_Rate_noNA.csv')
crime_rate_data
graphlab.canvas.set_target('ipynb')
crime_rate_data.show(view='Scatter Plot', x = "CrimeRate", y = "HousePrice")
"""
Explanation: Work with Philadelphia crime rate data
The dataset has information about the house prices in Philadelphia, additionally, has information about the crime rates in various neighborhoods. So we can see some interesting observations in this dataset as follows
Load data and do initial analysis
End of explanation
"""
crime_model = graphlab.linear_regression.create(crime_rate_data,
target = 'HousePrice',
features = ['CrimeRate'],
validation_set = None,
verbose = False)
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Fit the regression model using crime rate as the feature
End of explanation
"""
plt.plot(crime_rate_data['CrimeRate'], crime_rate_data['HousePrice'],
'.', crime_rate_data['CrimeRate'],
crime_model.predict(crime_rate_data), '-')
"""
Explanation: Look at the fit of the (initial) model
End of explanation
"""
crime_rate_data_noCC = crime_rate_data[crime_rate_data['MilesPhila'] != 0.0]
crime_rate_data_noCC.show(view='Scatter Plot', x = "CrimeRate", y = "HousePrice")
"""
Explanation: We can see that there is an outlier in the data, where the crime rate is high, but still, the house price is higher, hence not following the trend. This point is the center of the city (Center City data point)
Remove the Center CIty value, and re do the analysis
Center City is one observation with extremely high crime rate and high house prices. This is an outlier in some sense. So we can remove this and re fit the model
End of explanation
"""
crime_model_withNoCC = graphlab.linear_regression.create(crime_rate_data_noCC,
target = 'HousePrice',
features = ['CrimeRate'],
validation_set = None,
verbose = False)
"""
Explanation: Notice the difference in the previous scatter plot and this one after removing the outlier (city center)
End of explanation
"""
plt.plot(crime_rate_data_noCC['CrimeRate'], crime_rate_data_noCC['HousePrice'], '.',
crime_rate_data_noCC['CrimeRate'], crime_model_withNoCC.predict(crime_rate_data_noCC), '-')
"""
Explanation: Look at the fit of the model with outlier removed
End of explanation
"""
crime_model.get('coefficients')
crime_model_withNoCC.get('coefficients')
"""
Explanation: Compare coefficients for full data fit Vs. data with CenterCity removed
End of explanation
"""
crime_rate_data_noHighEnd = crime_rate_data_noCC[crime_rate_data_noCC['HousePrice'] < 350000]
crime_model_noHighEnd = graphlab.linear_regression.create(crime_rate_data_noHighEnd,
target = 'HousePrice',
features = ['CrimeRate'],
validation_set = None,
verbose = False)
"""
Explanation: Remove high-value outlier neighborhoods and redo analysis
End of explanation
"""
crime_model_withNoCC.get('coefficients')
crime_model_noHighEnd.get('coefficients')
"""
Explanation: How much do the coefficients change?
End of explanation
"""
|
Cyb3rWard0g/HELK | docker/helk-jupyter/notebooks/tutorials/02-intro-to-numpy-arrays.ipynb | gpl-3.0 | import array
array_one = array.array('i',[1,2,3,4])
type(array_one)
type(array_one[0])
"""
Explanation: Introduction to Python NumPy Arrays
Goals:
Learn the basics of Python Numpy Arrays
References:
* http://www.numpy.org/
* https://docs.scipy.org/doc/numpy/user/quickstart.html
* https://www.datacamp.com/community/tutorials/python-numpy-tutorial
* https://blog.thedataincubator.com/2018/02/numpy-and-pandas/
* https://medium.com/@ericvanrees/pandas-series-objects-and-numpy-arrays-15dfe05919d7
* https://www.machinelearningplus.com/python/numpy-tutorial-part1-array-python-examples/
* https://towardsdatascience.com/a-hitchhiker-guide-to-python-numpy-arrays-9358de570121
* McKinney, Wes. Python for Data Analysis: Data Wrangling with Pandas, NumPy, and IPython. O'Reilly Media. Kindle Edition
What is NumPy?
NumPy is short for "Numerical Python" and it is a fundamental python package for scientific computing.
It uses a high-performance data structure known as the n-dimensional array or ndarray, a multi-dimensional array object, for efficient computation of arrays and matrices.
What is an Array?
Python arrays are data structures that store data similar to a list, except the type of objects stored in them is constrained.
Elements of an array are all of the same type and indexed by a tuple of positive integers.
The python module array allows you to specify the type of array at object creation time by using a type code, which is a single character. You can read more about each type code here: https://docs.python.org/3/library/array.html?highlight=array#module-array
End of explanation
"""
import numpy as np
np.__version__
list_one = [1,2,3,4,5]
numpy_array = np.array(list_one)
type(numpy_array)
numpy_array
"""
Explanation: What is a NumPy N-Dimensional Array (ndarray)?
It is an efficient multidimensional array providing fast array-oriented arithmetic operations.
An ndarray as any other array, it is a container for homogeneous data (Elements of the same type)
In NumPy, data in an ndarray is simply referred to as an array.
As with other container objects in Python, the contents of an ndarray can be accessed and modified by indexing or slicing operations.
For numerical data, NumPy arrays are more efficient for storing and manipulating data than the other built-in Python data structures.
End of explanation
"""
list_two = [1,2,3,4,5]
# The following will throw an error:
list_two + 2
"""
Explanation: Advantages of NumPy Arrays
Vectorized Operations
The key difference between an array and a list is, arrays are designed to handle vectorized operations while a python list is not.
NumPy operations perform complex computations on entire arrays without the need for Python for loops.
In other words, if you apply a function to an array, it is performed on every item in the array, rather than on the whole array object.
In a python list, you will have to perform a loop over the elements of the list.
End of explanation
"""
for index, item in enumerate(list_two):
list_two[index] = item + 2
list_two
"""
Explanation: Performing a loop to add 2 to every integer in the list
End of explanation
"""
numpy_array
numpy_array + 2
"""
Explanation: With a NumPy array, you can do the same simply by doing the following:
End of explanation
"""
numpy_array_one = np.array([1,2])
numpy_array_two = np.array([4,6])
numpy_array_one + numpy_array_two
numpy_array_one > numpy_array_two
"""
Explanation: Any arithmetic operations between equal-size arrays applies the operation element-wise:
End of explanation
"""
import numpy as np
import sys
python_list = [1,2,3,4,5,6]
python_list_size = sys.getsizeof(1) * len(python_list)
python_list_size
python_numpy_array = np.array([1,2,3,4,5,6])
python_numpy_array_size = python_numpy_array.itemsize * python_numpy_array.size
python_numpy_array_size
"""
Explanation: Memory.
NumPy internally stores data in a contiguous block of memory, independent of other built-in Python objects.
NumPy arrays takes significantly less amount of memory as compared to python lists.
End of explanation
"""
numpy_array
numpy_array[1]
numpy_array[1:4]
"""
Explanation: Basic Indexing and Slicing
One Dimensional Array
When it comes down to slicing and indexing, one-dimensional arrays are the same as python lists
End of explanation
"""
numpy_array_slice = numpy_array[1:4]
numpy_array_slice
numpy_array_slice[1] = 10
numpy_array_slice
numpy_array
"""
Explanation: You can slice the array and pass it to a variable. Remember that variables just reference objects.
Any change that you make to the array slice, it will be technnically done on the original array object. Once again, variables just reference objects.
End of explanation
"""
numpy_two_dimensional_array = np.array([[1,2,3],[4,5,6],[7,8,9]])
numpy_two_dimensional_array
numpy_two_dimensional_array[1]
"""
Explanation: Two-Dimensional Array
In a two-dimensional array, elements of the array are one-dimensional arrays
End of explanation
"""
numpy_two_dimensional_array[1][2]
numpy_two_dimensional_array[1,2]
"""
Explanation: Instead of looping to the one-dimensional arrays to access specific elements, you can just pass a second index value
End of explanation
"""
numpy_two_dimensional_array
numpy_two_dimensional_array[:1]
numpy_two_dimensional_array[:2]
numpy_two_dimensional_array[:3]
numpy_two_dimensional_array[:2,1:]
numpy_two_dimensional_array[:2,:1]
numpy_two_dimensional_array[2][1:]
"""
Explanation: Slicing two-dimensional arrays is a little different than one-dimensional ones.
End of explanation
"""
|
MingChen0919/learning-apache-spark | notebooks/04-miscellaneous/add-python-files-to-spark-cluster.ipynb | mit | from pyspark import SparkConf, SparkContext, SparkFiles
from pyspark.sql import SparkSession
sc = SparkContext(conf=SparkConf())
"""
Explanation: The SparkContext.addPyFiles() function can be used to add py files. We can define objects and variables in these files and make them available to the Spark cluster.
Create a SparkContext object
End of explanation
"""
sc.addPyFile('pyFiles/my_module.py')
SparkFiles.get('my_module.py')
"""
Explanation: Add py files
End of explanation
"""
from my_module import *
addPyFiles_is_successfull()
sum_two_variables(4,5)
"""
Explanation: Use my_module.py
We can import my_module as a python module
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_stats_cluster_methods.ipynb | bsd-3-clause | # Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
# this changes hidden MPL vars:
from mpl_toolkits.mplot3d import Axes3D # noqa
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
"""
Explanation: Permutation t-test on toy data with spatial clustering
Following the illustrative example of Ridgway et al. 2012 [1],
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) [2] methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
<div class="alert alert-info"><h4>Note</h4><p>This example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.</p></div>
End of explanation
"""
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
"""
Explanation: Set parameters
End of explanation
"""
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
"""
Explanation: Construct simulated data
Make the connectivity matrix just next-neighbor spatially
End of explanation
"""
X = X.reshape((n_subjects, 1, n_src))
"""
Explanation: Do some statistics
<div class="alert alert-info"><h4>Note</h4><p>X needs to be a multi-dimensional array of shape
samples (subjects) x time x space, so we permute dimensions:</p></div>
End of explanation
"""
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun, buffer_size=None)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun, buffer_size=None)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
"""
Explanation: Now let's do some clustering using the standard method.
<div class="alert alert-info"><h4>Note</h4><p>Not specifying a connectivity matrix implies grid-like connectivity,
which we want here:</p></div>
End of explanation
"""
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
plt.show()
"""
Explanation: Visualize results
End of explanation
"""
|
NYUDataBootcamp/Projects | UG_S17/DataBootcamp_Spring2017_finalProject.ipynb | mit | %matplotlib inline
# import necessary packages
import pandas as pd
import matplotlib.pyplot as plt
from pandas_datareader import data
from datetime import datetime
import numpy as np
from textblob import TextBlob
import csv
from wordcloud import WordCloud,ImageColorGenerator
#from scipy.misc import imread
import string
"""
Explanation: Spring 2017 Data Bootcamp Final Project by Colleen Jin dj928, Yingying Chen yc1875
Analysis On Relation Between News Sentiment And Market Portfolio
In this project, we use two sets of data to draw insights on how media sentiment can be an indicator for the financial sector. For the financial data, we plan to use daily return of the market index <font color='green'>(^GSPC)</font>, which is a good indicator for market fluctuation; for media sentiment, we use summarized information of news pieces from top 10 most popular press because of their stronger influence in shaping people's perception of events that are happening in the world.
Both sets of data are real-time, which means the source files are of the moment and need to be loaded each time analysis is performed. The sentiment analysis library returns a <font color='green'>polarity</font> score (-1.0 to 1.0) and a <font color='green'>polarity</font> score (0.0 to 1.0) on the news stories. Using quantified sentiment analysis, we juxtapose the two time series of data and observe if they present any correlation and search for potential causality. For example, we may test the hypothesis that when polarity among the daily news posts is higher (a.k.a., positive), the financial market that same day is more likely to rise. The rest of the notebook is a step-by-step instruction.
Modules used in this notebook:
TextBlob: its library provides an API for common natural language processing <font color='green'>(NLP)</font> tasks, including part-of-speech tagging, noun phrase extraction, sentiment analysis, classification, translation, etc.
Non-Parametric Regression: a category of regression analysis in which the predictor does not take a predetermined form but is constructed according to information derived from the data.
WordCloud
Data sources:
News API: We use a news api provided by NewsAPI.org to load real-time news headlines (in the form of JSON metadata), then apply methods mainly from Python's TextBlob module to conduct sentiment analysis. We seleced 10 publish houses by their popularity (please see the ranking of news press here).
S&P 500 index open and closing price derived from Yahoo Finance.
End of explanation
"""
cnn = pd.read_json('https://newsapi.org/v1/articles?source=cnn&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518')
nyt= pd.read_json('https://newsapi.org/v1/articles?source=the-new-york-times&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518')
wsp=pd.read_json('https://newsapi.org/v1/articles?source=the-washington-post&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518')
bbc=pd.read_json("https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518")
abc=pd.read_json("https://newsapi.org/v1/articles?source=abc-news-au&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518")
#google = pd.read_json(" https://newsapi.org/v1/articles?source=google-news&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518")
ft = pd.read_json("https://newsapi.org/v1/articles?source=financial-times&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518")
bloomberg = pd.read_json("https://newsapi.org/v1/articles?source=bloomberg&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518")
economist = pd.read_json("https://newsapi.org/v1/articles?source=the-economist&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518")
wsj = pd.read_json("https://newsapi.org/v1/articles?source=the-wall-street-journal&sortBy=top&apiKey=bdc0623102e94a7586137f02a51e0518")
total = [wsj, cnn, nyt, wsp, bbc, abc, ft, bloomberg, economist]
total1 = pd.concat(total, ignore_index=True)
total1
"""
Explanation: PART 1: NEWS COLLECTION - pd.read_json()
We use <font color='green'>pd.read_json()</font> to import real-time news information (top 10 posts from each publisher). These news items are stored separately as dataframes and combined into one collective dataframe. (News API powered by NewsAPI.org)**
The news press consists of
* CNN,
* The New York Times,
* Washington Post,
* BBC News,
* ABC News,
* Financial Times,
* Bloomberg.
End of explanation
"""
k = 0
while k < len(total1):
if total1['articles'][k]['description'] is None:
total1['articles'][k]['description'] = 'None'
k += 1
j = 0
while j < len(total1):
print(type(total1['articles'][j]['description']))
j += 1
# now all entries are of type string, regardless whether there is real contents.
l = 0
while l < len(total1):
if total1['articles'][l]['title'] is None:
total1['articles'][l]['title'] = 'None'
l += 1
p = 0
while p < len(total1):
print(type(total1['articles'][p]['title']))
p += 1
# now all entries are of type string, regardless whether there is real contents.
"""
Explanation: Some values may be missing in the <font color='green'>article</font> column. For example, if there is no imformation of the key <font color='green'>author</font> of news pieces from BBC, it will indicates <font color='green'>None</font> where the <font color='green'>author</font> information should have been. Therefore, we need to convert <font color='green'>Nonetype</font> entries to string type, because the <font color='green'>.append()</font> method for a <font color='green'>list</font> cannot pass values of <font color='green'>Nonetype</font>. We will use <font color='green'>.append()</font> method later for displaying sentiment analysis results.
End of explanation
"""
# write the news posts into a new .csv file
n_rows = len(total1.index)
articles = total1['articles']
result = csv.writer(open('result.csv','a'))
result.writerow(['PublishedAt','Title','description'])
for i in range(0,n_rows):
line = [articles[i]['publishedAt'],articles[i]['title'],articles[i]['description']]
result.writerow(line)
# print the first item in the 'articles' series as an example.
articles[0]
# type of each entry in the 'articles' column is 'dict'
type(articles[0])
# keys of the 'dict' variables are 'author', 'publishedAt', 'urlToImage', 'description', 'title', 'url'
articles[0].keys()
"""
Explanation: Contents of the column named <font color='green'>articles</font> are of <font color='green'>dict</font> type; each row contains information including <font color='green'>author</font>, <font color='green'>title</font>, <font color='green'>description</font>, <font color='green'>url</font>, <font color='green'>urlToImage</font> and <font color='green'>publishedAt</font>, among which <font color='green'>title</font> is selected for main analysis.
End of explanation
"""
blob = TextBlob(str(articles[0]['title']))
blob.tags
"""
Explanation: The <font color='green'>tags</font> method performs part-of-speech tagging (for example, <font color='green'>NNP</font> stands for a singular proper noun).
End of explanation
"""
i = 0
while i < n_rows:
blob = TextBlob(articles[i]['title'])
print(1 + i, ". ", blob, sep = "")
i += 1
"""
Explanation: A loop prints all the news titles, which are later used for sentiment analysis.
End of explanation
"""
j = 0
while j < n_rows:
blob1 = TextBlob(str(articles[j]['description']))
print(1 + j, ". ", blob1, sep = "")
j += 1
"""
Explanation: All descriptions for the 100 news posts are printed in the same way as above; their presence is useful for adding accuracy for our sentiment analysis by providing more words on the same topic as the titles.
End of explanation
"""
#write the csv file into a txt file called entire_text.txt
contents = csv.reader(open('result.csv','r'))
texts = open('entire_text.txt','w')
list_of_text = []
for row in contents:
line = row[2].encode('utf-8')
line = str(line.decode())
list_of_text.append(line)
texts.writelines(list_of_text)
text=open("entire_text.txt",'r')
text=text.read()
wordcloud = WordCloud().generate(text)
#display the generated image
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
# increase max_font_size and change backgroud color to white
wordcloud = WordCloud(max_font_size=40).generate(text)
wordcloud = WordCloud(max_words=200,background_color='white',max_font_size=100).generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
"""
Explanation: PART 2: WORD CLOUD
A word cloud of news tiltles can provide us with a direct and vivid impression of the most frequently discussed topics in today's news reports. Topic/person/event that prevails among the top news pieces appears in the largest font, occupies the center space and displays the most salient colors.
In a visually pleasant way, a word cloud gives us a hint for the news sentiment of the day.
Code referred to https://github.com/amueller/word_cloud/blob/master/examples/simple.py
End of explanation
"""
# a loop to show sentiment analysis results of the 100 titles
n = 0
while n < n_rows:
print(TextBlob(articles[n]['title']).sentiment)
n += 1
"""
Explanation: PART 3: SENTIMENT ANALYSIS
We use <font color='green'>.sentiment</font> method from <font color='green'>TextBlob</font> to calculate polatiry and subjectivity of each <font color='green'>title</font>.
The <font color='green'>sentiment</font> property returns an output in the form of <font color='green'>namedtuple</font> (Sentiment(polarity, subjectivity)). The polarity score is a float within the range [-1.0, 1.0]. The subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective.
End of explanation
"""
N = 0
tests_title = []
while N < n_rows:
tests_title.append(TextBlob(articles[N]['title']).sentiment)
N += 1
"""
Explanation: From the TextBlob module, the <font color='green'>.sentiment</font> method returns results in the form of <font color='green'>namedtuples</font>. Elements in <font color='green'>namedtuples</font> can only be printed after being appended into the form of a <font color='green'>list</font>. Therefore, we use a <font color='green'>list</font> named <font color='green'>tests_title</font> to store all the results from our sentiment tests on the news titles.
End of explanation
"""
list_polarity_title = [] # this list contains all titles polarity scores.
for test in tests_title:
list_polarity_title.append(test.polarity)
"""
Explanation: We create a list named <font color='green'>list_polarity_title</font> to store polarity scores for news titles.
End of explanation
"""
list_subjectivity_title = [] # this list contains all titles subjectivity scores.
for test in tests_title:
list_subjectivity_title.append(test.subjectivity)
"""
Explanation: Similarly, we create a list of subjectivity scores for news titles.
End of explanation
"""
m = 0
while m < n_rows:
print(TextBlob(articles[m]['description']).sentiment)
m += 1
M = 0
tests_description = []
while M < n_rows:
tests_description.append(TextBlob(articles[M]['description']).sentiment)
M += 1
"""
Explanation: 'description'
We use <font color='green'>.sentiment</font> method again to calculate <font color='green'>polarity</font> and <font color='green'>subjectivity</font> of each <font color='green'>description</font>. As mentioned above, analysis on descritions make the final results more versatile and hopefully more accurate.
End of explanation
"""
list_polarity_description = [] # this list contains all descriptions' polarity scores.
for test in tests_description:
list_polarity_description.append(test.polarity)
"""
Explanation: We create a list of polarity scores for news descriptions by appending each polarity score to the list named <font color='green'>list_polarity_description</font>.
End of explanation
"""
list_subjectivity_description = [] # this list contains all descriptions' subjectivity scores.
for test in tests_description:
list_subjectivity_description.append(test.subjectivity)
"""
Explanation: Same as above, we create a list of subjectivity for news descriptions.
End of explanation
"""
total_score = [list_polarity_title, list_subjectivity_title, list_polarity_description, list_subjectivity_description]
labels = ['T_polarity', 'T_subjectivity', 'D_polarity', 'D_subjectivity']
df = pd.DataFrame.from_records(total_score, index = labels)
df
"""
Explanation: Now we have four lists of data:
1. list_polarity_title
2. list_subjectivity_title
3. list_polarity_description
4. list_subjectivity_description
We convert the four lists of data into one dataframe for drawing plots.
End of explanation
"""
df = df.transpose()
df
# this plot shows scores for all 100 news posts.
df.plot()
"""
Explanation: We transpose the dataframe to make it compatible with the .plot() method.
End of explanation
"""
c_T_polarity = df['T_polarity']
new_T_polarity = []
B = 0
C = 0
while B < n_rows:
sum = 0
while C < B + 10:
sum += c_T_polarity[C]
C += 1
new_T_polarity.append(sum)
B += 10
new_T_polarity
# The press are in the order as: wsj, cnn, nyt, wsp, bbc, abc, google, ft, bloomberg and economist.
c_T_subjectivity = df['T_subjectivity']
new_T_subjectivity = []
D = 0
E = 0
while D < n_rows:
sum = 0
while E < D + 10:
sum += c_T_subjectivity[E]
E += 1
new_T_subjectivity.append(sum)
D += 10
new_T_subjectivity
c_D_polarity = df['D_polarity']
new_D_polarity = []
F = 0
G = 0
while F < n_rows:
sum = 0
while G < F + 10:
sum += c_D_polarity[G]
G += 1
new_D_polarity.append(sum)
F += 10
new_D_polarity
c_D_subjectivity = df['D_subjectivity']
new_D_subjectivity = []
H = 0
I = 0
while H < n_rows:
sum = 0
while I < H + 10:
sum += c_D_subjectivity[I]
I += 1
new_D_subjectivity.append(sum)
H += 10
new_D_subjectivity
total_score_bypublishhouse = [new_T_polarity, new_T_subjectivity, new_D_polarity, new_D_subjectivity]
df1 = pd.DataFrame.from_records(total_score_bypublishhouse, index = labels)
df1
# change the column labels to press house.
new_columns = ['wsj', 'cnn', 'nyt', 'wsp', 'guardian', 'abc', 'ft', 'bloomberg', 'economist']
df1.columns = new_columns
df1
"""
Explanation: -Analysis by news press
Apparently, the 100 news posts standing alone aren't of much information. For a better perspective, we need to group scores by the press they belong to, under the assumption that posts from the same press are much more likely to embed a uniform tone. We create a list names <font color='green'>new_T_polarity</font> to store the sum of polarity scores of news titles for each press. The we do the same operation on subjectivity scores.
End of explanation
"""
#colors = [(x/10.0, x/20.0, 0.75) for x in range(n_rows)]
df1.plot(kind = 'bar', legend = True, figsize = (15, 2), colormap='Paired', grid = True)
# place the legend above the subplot and use all the expended width.
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=10, mode="expand", borderaxespad=0.)
bar_color = 'orange'
row = df1.iloc[0]
row.plot(kind = 'bar', title = "Polarity for news titles by news press", color = bar_color, grid = True)
"""
Explanation: Graph for scores by news press
End of explanation
"""
contents = csv.reader(open('all_news.csv','r', encoding = "ISO-8859-1"))
result = csv.writer(open('entire_result.csv','w'))
result.writerow(['Date','polarity'])
for row in contents:
comment = row[2]
blob = TextBlob(comment)
polarity = blob.sentiment.polarity
line = [row[0],polarity]
result.writerow(line)
data = pd.read_csv('entire_result.csv')
data
#group the data by date
data=data.groupby('Date', as_index=False)['polarity'].mean()
#convert column "Date" to a date data type
data['Date'] = pd.to_datetime(data['Date'])
#sort the data by date ascending
data=data.sort_values(by="Date", axis=0, ascending=True, inplace=False, kind='quicksort')
data
"""
Explanation: -Analysis by date
We have loaded news titles and descriptions over 2 weeks and stored them in a csv file called all_news.csv. We then calculated an average news polarity score for each day. We then then graph news polarity score to see how it has changed over time.
End of explanation
"""
data.plot(x=data["Date"],kind = 'bar',title='Polarity for news titles by date',grid = True, color = 'orange')
"""
Explanation: Graph for scores by date
End of explanation
"""
from yahoo_finance import Share
# '^GSPC' is the market symble for S&P 500 Index
yahoo = Share('^GSPC')
print(yahoo.get_open())
print(yahoo.get_price())
print(yahoo.get_trade_datetime())
from pprint import pprint
pprint(yahoo.get_historical('2017-04-09', '2017-05-09'))
"""
Explanation: Part 4: S&P 500 INDEX
Using the <font color='green'>yahoo_finance</font> module in Python, we will eventually compare the sentiment analysis of the news posts with the movement of the market index.
End of explanation
"""
from yahoo_finance import Share
yahoo = Share('^GSPC')
dataset = yahoo.get_historical('2017-04-27','2017-05-09')
result = csv.writer(open('yahoo.csv','w'))
result.writerow(['Date','Low','High'])
for i in range(0,len(dataset)):
line = [dataset[i]['Date'],dataset[i]['Low'],dataset[i]['High']]
result.writerow(line)
yahoo = pd.read_csv('yahoo.csv')
yahoo
#convert column "Date" to a date data type
yahoo['Date'] = pd.to_datetime(yahoo['Date'])
#sort the data by date ascending
yahoo=yahoo.sort_values(by="Date", axis=0, ascending=True, inplace=False, kind='quicksort')
yahoo
type(data['Date'])
type(yahoo['Date'])
"""
Explanation: We create a .csv file called yahoo.csv to store the financial data upon each import.
End of explanation
"""
#join yahoo and data together on "Date"
result = pd.merge(data, yahoo,on='Date')
result
result_len = len(result)
yahoo.plot(x="Date",figsize=(6, 2),title='Yahoo Finance')
data.plot(x='Date',figsize=(6, 2),title='News Title Polarity')
"""
Explanation: PART 5 CORRELATION BETWEEN NEWS POLARITY AND S&P 500
End of explanation
"""
import numpy
low=result['Low']
high=result['High']
polarity=result['polarity']
numpy.corrcoef(low, polarity)
#from the data we have, we can conclude that news polarity and S&P500 index are positively correlated
numpy.corrcoef(high, polarity)
numpy.corrcoef(high, low)
#a scatterplot for news polarity and Yahoo daily return of the market index
result.plot.scatter(x="polarity", y="Low")
"""
Explanation: Estimate correlation between polarity scores and S&P500 index
End of explanation
"""
#a parametic estimation for Yahoo daily return by news polarity
import seaborn as sns
#lmplot plots the data with the regression coefficient through it.
sns.lmplot(x="polarity", y="Low", data=result, ci=0.95) #ci stands for confidence interval
"""
Explanation: A parametic estimation for Yahoo daily return by news polarity
End of explanation
"""
import pyqt_fit.nonparam_regression as smooth
from pyqt_fit import npr_methods
k0 = smooth.NonParamRegression(polarity, low, method=npr_methods.SpatialAverage())
k0.fit()
grid = np.r_[-0.05:0.05:0.01]
plt.plot(grid, k0(grid), label="Spatial Averaging", linewidth=2)
plt.legend(loc='best')
"""
Explanation: A non-parametic estimation for Yahoo daily return by news polarity
End of explanation
"""
|
physion/ovation-python | examples/requisition-import-from-csv.ipynb | gpl-3.0 | import dateutil.parser
import csv
from ovation.session import connect_lab
from tqdm import tqdm_notebook as tqdm
"""
Explanation: Requisition import
This example demonstrates importing requisition(s) from a CSV file
Setup
End of explanation
"""
user = input('Email: ')
s = connect_lab(user, api='https://lab-services-staging.ovation.io/') # use api='https://lab-services.ovation.io' for production
"""
Explanation: Session
End of explanation
"""
def get_project_by_name(s, project_name, organization_id=0):
projects = s.get(s.path('project'),
params={'organization_id': organization_id}).projects
project = list(filter(lambda p: p.name == project_name, projects))[0]
return project
def create_container(s, organization_id, container_type, container_barcode=None):
container_data = {'type': container_type}
if container_barcode is not None:
container_data['barcode'] = container_barcode
container = s.post(s.path('container'),
data={'container': container_data},
params={'organization_id': organization_id})
return container
def create_requisition(s, project=None,
organization_id=0,
identifier=None,
template_name=None,
container_type='Tube',
container_barcode=None,
container_position='A01',
custom_attributes={},
sample_date_received=None,
patient_mrn=None,
patient_gender=None,
patient_dob=None,
patient_first_name=None,
patient_last_name=None,
panel=None,
physician_email=None,
sample_collection_date=None
):
# Create sample container
container = create_container(s, organization_id, container_type, container_barcode)
# Create the requisition
sample_data = {"identifier": identifier,
"sample_states": [{"container_id": container.id,
"position": container_position}]}
if sample_date_received:
sample_data["date_received"] = sample_date_received
else:
sample_data["received"] = False
gender = {}
if patient_gender.lower == 'f' or patient_gender.lower() == 'female':
gender['female'] = True
elif patient_gender.lower == 'm' or patient_gender.lower() == 'male':
gender['male'] = True
requisition_data = {"identifier": identifier, # Any unique (within organization) identifier
"template": template_name,
"custom_attributes": custom_attributes,
"sample_collection_date": sample_collection_date,
"samples": [sample_data],
"patient": {
"identifier": patient_mrn,
"first_name": patient_first_name,
"last_name": patient_last_name,
"date_of_birth": patient_dob,
"gender": gender
},
"requested_tests": {
panel: True
},
"complete": False,
"organization_id": organization_id,
"project_id": project.id
}
if physician_email:
requisition_data["physician"] = {"contact_email": physician_email}
req = s.post(s.path('requisition'),
data={'requisition': requisition_data},
params={'organization_id': organization_id,
"project_id": project.id})
return req
def import_requisition(s, row, organization_id=0, template_name=None, project=None, physician_email=None):
assert project is not None
assert template_name is not None
assert row is not None
identifier = row['accession']
container_type = 'Tube',
container_position = 'A01',
container_barcode = row['tube no']
custom_attributes = {'date shipped': row['date shipped']} # Add Institution, Department, etc.
patient_mrn = row['mrn']
patient_gender = row['gender']
dob = dateutil.parser.parse(row['dob']) if len(row['dob']) > 0 else None
patient_dob = dob.date().isoformat() if dob else None
patient_first_name = row['first name']
patient_last_name = row['last name']
panel = row['test mnemonic'] # Must be a panel code!
sample_collection_date = dateutil.parser.parse(row['collect date']) if len(row['collect date']) > 0 else None
sample_collection_date = sample_collection_date.date().isoformat() if sample_collection_date else None
return create_requisition(s, project,
organization_id=organization_id,
identifier=identifier,
template_name=template_name,
container_type='Tube',
container_barcode=container_barcode,
container_position='A01',
custom_attributes=custom_attributes,
sample_date_received=None,
patient_mrn=patient_mrn,
patient_gender=patient_gender,
patient_dob=patient_dob,
patient_first_name=patient_first_name,
patient_last_name=patient_last_name,
panel=panel,
physician_email=physician_email,
sample_collection_date=sample_collection_date)
"""
Explanation: Functions
Several functions that we'll use for the import…
End of explanation
"""
organization_id = int(input('Organization ID: '))
csv_path = input('CSV path: ')
template_name = input("Requisition template name: ")
project_name = input('Project name: ')
project = get_project_by_name(s, project_name, organization_id=organization_id)
with open(csv_path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in tqdm(reader, unit='requisitions'):
import_requisition(s, row, organization_id=organization_id, template_name=template_name, project=project, physician_email=None)
"""
Explanation: Import
Collect required information, then import each requisition in the provided CSV.
End of explanation
"""
|
chengwliu/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/Ch2_MorePyMC_PyMC2.ipynb | mit | import pymc as pm
parameter = pm.Exponential("poisson_param", 1)
data_generator = pm.Poisson("data_generator", parameter)
data_plus_one = data_generator + 1
"""
Explanation: Chapter 2
This chapter introduces more PyMC syntax and design patterns, and ways to think about how to model a system from a Bayesian perspective. It also contains tips and data visualization techniques for assessing goodness-of-fit for your Bayesian model.
A little more on PyMC
Parent and Child relationships
To assist with describing Bayesian relationships, and to be consistent with PyMC's documentation, we introduce parent and child variables.
parent variables are variables that influence another variable.
child variable are variables that are affected by other variables, i.e. are the subject of parent variables.
A variable can be both a parent and child. For example, consider the PyMC code below.
End of explanation
"""
print("Children of `parameter`: ")
print(parameter.children)
print("\nParents of `data_generator`: ")
print(data_generator.parents)
print("\nChildren of `data_generator`: ")
print(data_generator.children)
"""
Explanation: parameter controls the parameter of data_generator, hence influences its values. The former is a parent of the latter. By symmetry, data_generator is a child of parameter.
Likewise, data_generator is a parent to the variable data_plus_one (hence making data_generator both a parent and child variable). Although it does not look like one, data_plus_one should be treated as a PyMC variable as it is a function of another PyMC variable, hence is a child variable to data_generator.
This nomenclature is introduced to help us describe relationships in PyMC modeling. You can access a variable's children and parent variables using the children and parents attributes attached to variables.
End of explanation
"""
print("parameter.value =", parameter.value)
print("data_generator.value =", data_generator.value)
print("data_plus_one.value =", data_plus_one.value)
"""
Explanation: Of course a child can have more than one parent, and a parent can have many children.
PyMC Variables
All PyMC variables also expose a value attribute. This method produces the current (possibly random) internal value of the variable. If the variable is a child variable, its value changes given the variable's parents' values. Using the same variables from before:
End of explanation
"""
lambda_1 = pm.Exponential("lambda_1", 1) # prior on first behaviour
lambda_2 = pm.Exponential("lambda_2", 1) # prior on second behaviour
tau = pm.DiscreteUniform("tau", lower=0, upper=10) # prior on behaviour change
print("lambda_1.value = %.3f" % lambda_1.value)
print("lambda_2.value = %.3f" % lambda_2.value)
print("tau.value = %.3f" % tau.value, "\n")
lambda_1.random(), lambda_2.random(), tau.random()
print("After calling random() on the variables...")
print("lambda_1.value = %.3f" % lambda_1.value)
print("lambda_2.value = %.3f" % lambda_2.value)
print("tau.value = %.3f" % tau.value)
"""
Explanation: PyMC is concerned with two types of programming variables: stochastic and deterministic.
stochastic variables are variables that are not deterministic, i.e., even if you knew all the values of the variables' parents (if it even has any parents), it would still be random. Included in this category are instances of classes Poisson, DiscreteUniform, and Exponential.
deterministic variables are variables that are not random if the variables' parents were known. This might be confusing at first: a quick mental check is if I knew all of variable foo's parent variables, I could determine what foo's value is.
We will detail each below.
Initializing Stochastic variables
Initializing a stochastic variable requires a name argument, plus additional parameters that are class specific. For example:
some_variable = pm.DiscreteUniform("discrete_uni_var", 0, 4)
where 0, 4 are the DiscreteUniform-specific lower and upper bound on the random variable. The PyMC docs contain the specific parameters for stochastic variables. (Or use object??, for example pm.DiscreteUniform?? if you are using IPython!)
The name attribute is used to retrieve the posterior distribution later in the analysis, so it is best to use a descriptive name. Typically, I use the Python variable's name as the name.
For multivariable problems, rather than creating a Python array of stochastic variables, addressing the size keyword in the call to a Stochastic variable creates multivariate array of (independent) stochastic variables. The array behaves like a Numpy array when used like one, and references to its value attribute return Numpy arrays.
The size argument also solves the annoying case where you may have many variables $\beta_i, \; i = 1,...,N$ you wish to model. Instead of creating arbitrary names and variables for each one, like:
beta_1 = pm.Uniform("beta_1", 0, 1)
beta_2 = pm.Uniform("beta_2", 0, 1)
...
we can instead wrap them into a single variable:
betas = pm.Uniform("betas", 0, 1, size=N)
Calling random()
We can also call on a stochastic variable's random() method, which (given the parent values) will generate a new, random value. Below we demonstrate this using the texting example from the previous chapter.
End of explanation
"""
type(lambda_1 + lambda_2)
"""
Explanation: The call to random stores a new value into the variable's value attribute. In fact, this new value is stored in the computer's cache for faster recall and efficiency.
Warning: Don't update stochastic variables' values in-place.
Straight from the PyMC docs, we quote [4]:
Stochastic objects' values should not be updated in-place. This confuses PyMC's caching scheme... The only way a stochastic variable's value should be updated is using statements of the following form:
A.value = new_value
The following are in-place updates and should never be used:
A.value += 3
A.value[2,1] = 5
A.value.attribute = new_attribute_value
Deterministic variables
Since most variables you will be modeling are stochastic, we distinguish deterministic variables with a pymc.deterministic wrapper. (If you are unfamiliar with Python wrappers (also called decorators), that's no problem. Just prepend the pymc.deterministic decorator before the variable declaration and you're good to go. No need to know more. ) The declaration of a deterministic variable uses a Python function:
@pm.deterministic
def some_deterministic_var(v1=v1,):
#jelly goes here.
For all purposes, we can treat the object some_deterministic_var as a variable and not a Python function.
Prepending with the wrapper is the easiest way, but not the only way, to create deterministic variables: elementary operations, like addition, exponentials etc. implicitly create deterministic variables. For example, the following returns a deterministic variable:
End of explanation
"""
import numpy as np
n_data_points = 5 # in CH1 we had ~70 data points
@pm.deterministic
def lambda_(tau=tau, lambda_1=lambda_1, lambda_2=lambda_2):
out = np.zeros(n_data_points)
out[:tau] = lambda_1 # lambda before tau is lambda1
out[tau:] = lambda_2 # lambda after tau is lambda2
return out
"""
Explanation: The use of the deterministic wrapper was seen in the previous chapter's text-message example. Recall the model for $\lambda$ looked like:
$$
\lambda =
\begin{cases}
\lambda_1 & \text{if } t \lt \tau \cr
\lambda_2 & \text{if } t \ge \tau
\end{cases}
$$
And in PyMC code:
End of explanation
"""
%matplotlib inline
from IPython.core.pylabtools import figsize
from matplotlib import pyplot as plt
figsize(12.5, 4)
samples = [lambda_1.random() for i in range(20000)]
plt.hist(samples, bins=70, normed=True, histtype="stepfilled")
plt.title("Prior distribution for $\lambda_1$")
plt.xlim(0, 8);
"""
Explanation: Clearly, if $\tau, \lambda_1$ and $\lambda_2$ are known, then $\lambda$ is known completely, hence it is a deterministic variable.
Inside the deterministic decorator, the Stochastic variables passed in behave like scalars or Numpy arrays (if multivariable), and not like Stochastic variables. For example, running the following:
@pm.deterministic
def some_deterministic(stoch=some_stochastic_var):
return stoch.value**2
will return an AttributeError detailing that stoch does not have a value attribute. It simply needs to be stoch**2. During the learning phase, it's the variable's value that is repeatedly passed in, not the actual variable.
Notice in the creation of the deterministic function we added defaults to each variable used in the function. This is a necessary step, and all variables must have default values.
Including observations in the Model
At this point, it may not look like it, but we have fully specified our priors. For example, we can ask and answer questions like "What does my prior distribution of $\lambda_1$ look like?"
End of explanation
"""
data = np.array([10, 5])
fixed_variable = pm.Poisson("fxd", 1, value=data, observed=True)
print("value: ", fixed_variable.value)
print("calling .random()")
fixed_variable.random()
print("value: ", fixed_variable.value)
"""
Explanation: To frame this in the notation of the first chapter, though this is a slight abuse of notation, we have specified $P(A)$. Our next goal is to include data/evidence/observations $X$ into our model.
PyMC stochastic variables have a keyword argument observed which accepts a boolean (False by default). The keyword observed has a very simple role: fix the variable's current value, i.e. make value immutable. We have to specify an initial value in the variable's creation, equal to the observations we wish to include, typically an array (and it should be an Numpy array for speed). For example:
End of explanation
"""
# We're using some fake data here
data = np.array([10, 25, 15, 20, 35])
obs = pm.Poisson("obs", lambda_, value=data, observed=True)
print(obs.value)
"""
Explanation: This is how we include data into our models: initializing a stochastic variable to have a fixed value.
To complete our text message example, we fix the PyMC variable observations to the observed dataset.
End of explanation
"""
model = pm.Model([obs, lambda_, lambda_1, lambda_2, tau])
"""
Explanation: Finally...
We wrap all the created variables into a pm.Model class. With this Model class, we can analyze the variables as a single unit. This is an optional step, as the fitting algorithms can be sent an array of the variables rather than a Model class. I may or may not use this class in future examples ;)
End of explanation
"""
tau = pm.rdiscrete_uniform(0, 80)
print(tau)
"""
Explanation: Modeling approaches
A good starting point in Bayesian modeling is to think about how your data might have been generated. Put yourself in an omniscient position, and try to imagine how you would recreate the dataset.
In the last chapter we investigated text message data. We begin by asking how our observations may have been generated:
We started by thinking "what is the best random variable to describe this count data?" A Poisson random variable is a good candidate because it can represent count data. So we model the number of sms's received as sampled from a Poisson distribution.
Next, we think, "Ok, assuming sms's are Poisson-distributed, what do I need for the Poisson distribution?" Well, the Poisson distribution has a parameter $\lambda$.
Do we know $\lambda$? No. In fact, we have a suspicion that there are two $\lambda$ values, one for the earlier behaviour and one for the latter behaviour. We don't know when the behaviour switches though, but call the switchpoint $\tau$.
What is a good distribution for the two $\lambda$s? The exponential is good, as it assigns probabilities to positive real numbers. Well the exponential distribution has a parameter too, call it $\alpha$.
Do we know what the parameter $\alpha$ might be? No. At this point, we could continue and assign a distribution to $\alpha$, but it's better to stop once we reach a set level of ignorance: whereas we have a prior belief about $\lambda$, ("it probably changes over time", "it's likely between 10 and 30", etc.), we don't really have any strong beliefs about $\alpha$. So it's best to stop here.
What is a good value for $\alpha$ then? We think that the $\lambda$s are between 10-30, so if we set $\alpha$ really low (which corresponds to larger probability on high values) we are not reflecting our prior well. Similar, a too-high alpha misses our prior belief as well. A good idea for $\alpha$ as to reflect our belief is to set the value so that the mean of $\lambda$, given $\alpha$, is equal to our observed mean. This was shown in the last chapter.
We have no expert opinion of when $\tau$ might have occurred. So we will suppose $\tau$ is from a discrete uniform distribution over the entire timespan.
Below we give a graphical visualization of this, where arrows denote parent-child relationships. (provided by the Daft Python library )
<img src="http://i.imgur.com/7J30oCG.png" width = 700/>
PyMC, and other probabilistic programming languages, have been designed to tell these data-generation stories. More generally, B. Cronin writes [5]:
Probabilistic programming will unlock narrative explanations of data, one of the holy grails of business analytics and the unsung hero of scientific persuasion. People think in terms of stories - thus the unreasonable power of the anecdote to drive decision-making, well-founded or not. But existing analytics largely fails to provide this kind of story; instead, numbers seemingly appear out of thin air, with little of the causal context that humans prefer when weighing their options.
Same story; different ending.
Interestingly, we can create new datasets by retelling the story.
For example, if we reverse the above steps, we can simulate a possible realization of the dataset.
1. Specify when the user's behaviour switches by sampling from $\text{DiscreteUniform}(0, 80)$:
End of explanation
"""
alpha = 1. / 20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
print(lambda_1, lambda_2)
"""
Explanation: 2. Draw $\lambda_1$ and $\lambda_2$ from an $\text{Exp}(\alpha)$ distribution:
End of explanation
"""
data = np.r_[pm.rpoisson(lambda_1, tau), pm.rpoisson(lambda_2, 80 - tau)]
"""
Explanation: 3. For days before $\tau$, represent the user's received SMS count by sampling from $\text{Poi}(\lambda_1)$, and sample from $\text{Poi}(\lambda_2)$ for days after $\tau$. For example:
End of explanation
"""
plt.bar(np.arange(80), data, color="#348ABD")
plt.bar(tau - 1, data[tau - 1], color="r", label="user behaviour changed")
plt.xlabel("Time (days)")
plt.ylabel("count of text-msgs received")
plt.title("Artificial dataset")
plt.xlim(0, 80)
plt.legend();
"""
Explanation: 4. Plot the artificial dataset:
End of explanation
"""
def plot_artificial_sms_dataset():
tau = pm.rdiscrete_uniform(0, 80)
alpha = 1. / 20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
data = np.r_[pm.rpoisson(lambda_1, tau), pm.rpoisson(lambda_2, 80 - tau)]
plt.bar(np.arange(80), data, color="#348ABD")
plt.bar(tau - 1, data[tau - 1], color="r", label="user behaviour changed")
plt.xlim(0, 80)
figsize(12.5, 5)
plt.suptitle("More examples of artificial datasets", fontsize=14)
for i in range(1, 5):
plt.subplot(4, 1, i)
plot_artificial_sms_dataset()
"""
Explanation: It is okay that our fictional dataset does not look like our observed dataset: the probability is incredibly small it indeed would. PyMC's engine is designed to find good parameters, $\lambda_i, \tau$, that maximize this probability.
The ability to generate artificial datasets is an interesting side effect of our modeling, and we will see that this ability is a very important method of Bayesian inference. We produce a few more datasets below:
End of explanation
"""
import pymc as pm
# The parameters are the bounds of the Uniform.
p = pm.Uniform('p', lower=0, upper=1)
"""
Explanation: Later we will see how we use this to make predictions and test the appropriateness of our models.
Example: Bayesian A/B testing
A/B testing is a statistical design pattern for determining the difference of effectiveness between two different treatments. For example, a pharmaceutical company is interested in the effectiveness of drug A vs drug B. The company will test drug A on some fraction of their trials, and drug B on the other fraction (this fraction is often 1/2, but we will relax this assumption). After performing enough trials, the in-house statisticians sift through the data to determine which drug yielded better results.
Similarly, front-end web developers are interested in which design of their website yields more sales or some other metric of interest. They will route some fraction of visitors to site A, and the other fraction to site B, and record if the visit yielded a sale or not. The data is recorded (in real-time), and analyzed afterwards.
Often, the post-experiment analysis is done using something called a hypothesis test like difference of means test or difference of proportions test. This involves often misunderstood quantities like a "Z-score" and even more confusing "p-values" (please don't ask). If you have taken a statistics course, you have probably been taught this technique (though not necessarily learned this technique). And if you were like me, you may have felt uncomfortable with their derivation -- good: the Bayesian approach to this problem is much more natural.
A Simple Case
As this is a hacker book, we'll continue with the web-dev example. For the moment, we will focus on the analysis of site A only. Assume that there is some true $0 \lt p_A \lt 1$ probability that users who, upon shown site A, eventually purchase from the site. This is the true effectiveness of site A. Currently, this quantity is unknown to us.
Suppose site A was shown to $N$ people, and $n$ people purchased from the site. One might conclude hastily that $p_A = \frac{n}{N}$. Unfortunately, the observed frequency $\frac{n}{N}$ does not necessarily equal $p_A$ -- there is a difference between the observed frequency and the true frequency of an event. The true frequency can be interpreted as the probability of an event occurring. For example, the true frequency of rolling a 1 on a 6-sided die is $\frac{1}{6}$. Knowing the true frequency of events like:
fraction of users who make purchases,
frequency of social attributes,
percent of internet users with cats etc.
are common requests we ask of Nature. Unfortunately, often Nature hides the true frequency from us and we must infer it from observed data.
The observed frequency is then the frequency we observe: say rolling the die 100 times you may observe 20 rolls of 1. The observed frequency, 0.2, differs from the true frequency, $\frac{1}{6}$. We can use Bayesian statistics to infer probable values of the true frequency using an appropriate prior and observed data.
With respect to our A/B example, we are interested in using what we know, $N$ (the total trials administered) and $n$ (the number of conversions), to estimate what $p_A$, the true frequency of buyers, might be.
To set up a Bayesian model, we need to assign prior distributions to our unknown quantities. A priori, what do we think $p_A$ might be? For this example, we have no strong conviction about $p_A$, so for now, let's assume $p_A$ is uniform over [0,1]:
End of explanation
"""
# set constants
p_true = 0.05 # remember, this is unknown.
N = 1500
# sample N Bernoulli random variables from Ber(0.05).
# each random variable has a 0.05 chance of being a 1.
# this is the data-generation step
occurrences = pm.rbernoulli(p_true, N)
print(occurrences) # Remember: Python treats True == 1, and False == 0
print(occurrences.sum())
"""
Explanation: Had we had stronger beliefs, we could have expressed them in the prior above.
For this example, consider $p_A = 0.05$, and $N = 1500$ users shown site A, and we will simulate whether the user made a purchase or not. To simulate this from $N$ trials, we will use a Bernoulli distribution: if $X\ \sim \text{Ber}(p)$, then $X$ is 1 with probability $p$ and 0 with probability $1 - p$. Of course, in practice we do not know $p_A$, but we will use it here to simulate the data.
End of explanation
"""
# Occurrences.mean is equal to n/N.
print("What is the observed frequency in Group A? %.4f" % occurrences.mean())
print("Does this equal the true frequency? %s" % (occurrences.mean() == p_true))
"""
Explanation: The observed frequency is:
End of explanation
"""
# include the observations, which are Bernoulli
obs = pm.Bernoulli("obs", p, value=occurrences, observed=True)
# To be explained in chapter 3
mcmc = pm.MCMC([p, obs])
mcmc.sample(18000, 1000)
"""
Explanation: We combine the observations into the PyMC observed variable, and run our inference algorithm:
End of explanation
"""
figsize(12.5, 4)
plt.title("Posterior distribution of $p_A$, the true effectiveness of site A")
plt.vlines(p_true, 0, 90, linestyle="--", label="true $p_A$ (unknown)")
plt.hist(mcmc.trace("p")[:], bins=25, histtype="stepfilled", normed=True)
plt.legend();
"""
Explanation: We plot the posterior distribution of the unknown $p_A$ below:
End of explanation
"""
import pymc as pm
figsize(12, 4)
# these two quantities are unknown to us.
true_p_A = 0.05
true_p_B = 0.04
# notice the unequal sample sizes -- no problem in Bayesian analysis.
N_A = 1500
N_B = 750
# generate some observations
observations_A = pm.rbernoulli(true_p_A, N_A)
observations_B = pm.rbernoulli(true_p_B, N_B)
print("Obs from Site A: ", observations_A[:30].astype(int), "...")
print("Obs from Site B: ", observations_B[:30].astype(int), "...")
print(observations_A.mean())
print(observations_B.mean())
# Set up the pymc model. Again assume Uniform priors for p_A and p_B.
p_A = pm.Uniform("p_A", 0, 1)
p_B = pm.Uniform("p_B", 0, 1)
# Define the deterministic delta function. This is our unknown of interest.
@pm.deterministic
def delta(p_A=p_A, p_B=p_B):
return p_A - p_B
# Set of observations, in this case we have two observation datasets.
obs_A = pm.Bernoulli("obs_A", p_A, value=observations_A, observed=True)
obs_B = pm.Bernoulli("obs_B", p_B, value=observations_B, observed=True)
# To be explained in chapter 3.
mcmc = pm.MCMC([p_A, p_B, delta, obs_A, obs_B])
mcmc.sample(20000, 1000)
"""
Explanation: Our posterior distribution puts most weight near the true value of $p_A$, but also some weights in the tails. This is a measure of how uncertain we should be, given our observations. Try changing the number of observations, N, and observe how the posterior distribution changes.
A and B Together
A similar analysis can be done for site B's response data to determine the analogous $p_B$. But what we are really interested in is the difference between $p_A$ and $p_B$. Let's infer $p_A$, $p_B$, and $\text{delta} = p_A - p_B$, all at once. We can do this using PyMC's deterministic variables. (We'll assume for this exercise that $p_B = 0.04$, so $\text{delta} = 0.01$, $N_B = 750$ (significantly less than $N_A$) and we will simulate site B's data like we did for site A's data )
End of explanation
"""
p_A_samples = mcmc.trace("p_A")[:]
p_B_samples = mcmc.trace("p_B")[:]
delta_samples = mcmc.trace("delta")[:]
figsize(12.5, 10)
# histogram of posteriors
ax = plt.subplot(311)
plt.xlim(0, .1)
plt.hist(p_A_samples, histtype='stepfilled', bins=25, alpha=0.85,
label="posterior of $p_A$", color="#A60628", normed=True)
plt.vlines(true_p_A, 0, 80, linestyle="--", label="true $p_A$ (unknown)")
plt.legend(loc="upper right")
plt.title("Posterior distributions of $p_A$, $p_B$, and delta unknowns")
ax = plt.subplot(312)
plt.xlim(0, .1)
plt.hist(p_B_samples, histtype='stepfilled', bins=25, alpha=0.85,
label="posterior of $p_B$", color="#467821", normed=True)
plt.vlines(true_p_B, 0, 80, linestyle="--", label="true $p_B$ (unknown)")
plt.legend(loc="upper right")
ax = plt.subplot(313)
plt.hist(delta_samples, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of delta", color="#7A68A6", normed=True)
plt.vlines(true_p_A - true_p_B, 0, 60, linestyle="--",
label="true delta (unknown)")
plt.vlines(0, 0, 60, color="black", alpha=0.2)
plt.legend(loc="upper right");
"""
Explanation: Below we plot the posterior distributions for the three unknowns:
End of explanation
"""
# Count the number of samples less than 0, i.e. the area under the curve
# before 0, represent the probability that site A is worse than site B.
print("Probability site A is WORSE than site B: %.3f" % \
(delta_samples < 0).mean())
print("Probability site A is BETTER than site B: %.3f" % \
(delta_samples > 0).mean())
"""
Explanation: Notice that as a result of N_B < N_A, i.e. we have less data from site B, our posterior distribution of $p_B$ is fatter, implying we are less certain about the true value of $p_B$ than we are of $p_A$.
With respect to the posterior distribution of $\text{delta}$, we can see that the majority of the distribution is above $\text{delta}=0$, implying there site A's response is likely better than site B's response. The probability this inference is incorrect is easily computable:
End of explanation
"""
figsize(12.5, 4)
import scipy.stats as stats
binomial = stats.binom
parameters = [(10, .4), (10, .9)]
colors = ["#348ABD", "#A60628"]
for i in range(2):
N, p = parameters[i]
_x = np.arange(N + 1)
plt.bar(_x - 0.5, binomial.pmf(_x, N, p), color=colors[i],
edgecolor=colors[i],
alpha=0.6,
label="$N$: %d, $p$: %.1f" % (N, p),
linewidth=3)
plt.legend(loc="upper left")
plt.xlim(0, 10.5)
plt.xlabel("$k$")
plt.ylabel("$P(X = k)$")
plt.title("Probability mass distributions of binomial random variables");
"""
Explanation: If this probability is too high for comfortable decision-making, we can perform more trials on site B (as site B has less samples to begin with, each additional data point for site B contributes more inferential "power" than each additional data point for site A).
Try playing with the parameters true_p_A, true_p_B, N_A, and N_B, to see what the posterior of $\text{delta}$ looks like. Notice in all this, the difference in sample sizes between site A and site B was never mentioned: it naturally fits into Bayesian analysis.
I hope the readers feel this style of A/B testing is more natural than hypothesis testing, which has probably confused more than helped practitioners. Later in this book, we will see two extensions of this model: the first to help dynamically adjust for bad sites, and the second will improve the speed of this computation by reducing the analysis to a single equation.
An algorithm for human deceit
Social data has an additional layer of interest as people are not always honest with responses, which adds a further complication into inference. For example, simply asking individuals "Have you ever cheated on a test?" will surely contain some rate of dishonesty. What you can say for certain is that the true rate is less than your observed rate (assuming individuals lie only about not cheating; I cannot imagine one who would admit "Yes" to cheating when in fact they hadn't cheated).
To present an elegant solution to circumventing this dishonesty problem, and to demonstrate Bayesian modeling, we first need to introduce the binomial distribution.
The Binomial Distribution
The binomial distribution is one of the most popular distributions, mostly because of its simplicity and usefulness. Unlike the other distributions we have encountered thus far in the book, the binomial distribution has 2 parameters: $N$, a positive integer representing $N$ trials or number of instances of potential events, and $p$, the probability of an event occurring in a single trial. Like the Poisson distribution, it is a discrete distribution, but unlike the Poisson distribution, it only weighs integers from $0$ to $N$. The mass distribution looks like:
$$P( X = k ) = {{N}\choose{k}} p^k(1-p)^{N-k}$$
If $X$ is a binomial random variable with parameters $p$ and $N$, denoted $X \sim \text{Bin}(N,p)$, then $X$ is the number of events that occurred in the $N$ trials (obviously $0 \le X \le N$), and $p$ is the probability of a single event. The larger $p$ is (while still remaining between 0 and 1), the more events are likely to occur. The expected value of a binomial is equal to $Np$. Below we plot the mass probability distribution for varying parameters.
End of explanation
"""
import pymc as pm
N = 100
p = pm.Uniform("freq_cheating", 0, 1)
"""
Explanation: The special case when $N = 1$ corresponds to the Bernoulli distribution. There is another connection between Bernoulli and Binomial random variables. If we have $X_1, X_2, ... , X_N$ Bernoulli random variables with the same $p$, then $Z = X_1 + X_2 + ... + X_N \sim \text{Binomial}(N, p )$.
The expected value of a Bernoulli random variable is $p$. This can be seen by noting the more general Binomial random variable has expected value $Np$ and setting $N=1$.
Example: Cheating among students
We will use the binomial distribution to determine the frequency of students cheating during an exam. If we let $N$ be the total number of students who took the exam, and assuming each student is interviewed post-exam (answering without consequence), we will receive integer $X$ "Yes I did cheat" answers. We then find the posterior distribution of $p$, given $N$, some specified prior on $p$, and observed data $X$.
This is a completely absurd model. No student, even with a free-pass against punishment, would admit to cheating. What we need is a better algorithm to ask students if they had cheated. Ideally the algorithm should encourage individuals to be honest while preserving privacy. The following proposed algorithm is a solution I greatly admire for its ingenuity and effectiveness:
In the interview process for each student, the student flips a coin, hidden from the interviewer. The student agrees to answer honestly if the coin comes up heads. Otherwise, if the coin comes up tails, the student (secretly) flips the coin again, and answers "Yes, I did cheat" if the coin flip lands heads, and "No, I did not cheat", if the coin flip lands tails. This way, the interviewer does not know if a "Yes" was the result of a guilty plea, or a Heads on a second coin toss. Thus privacy is preserved and the researchers receive honest answers.
I call this the Privacy Algorithm. One could of course argue that the interviewers are still receiving false data since some Yes's are not confessions but instead randomness, but an alternative perspective is that the researchers are discarding approximately half of their original dataset since half of the responses will be noise. But they have gained a systematic data generation process that can be modeled. Furthermore, they do not have to incorporate (perhaps somewhat naively) the possibility of deceitful answers. We can use PyMC to dig through this noisy model, and find a posterior distribution for the true frequency of liars.
Suppose 100 students are being surveyed for cheating, and we wish to find $p$, the proportion of cheaters. There are a few ways we can model this in PyMC. I'll demonstrate the most explicit way, and later show a simplified version. Both versions arrive at the same inference. In our data-generation model, we sample $p$, the true proportion of cheaters, from a prior. Since we are quite ignorant about $p$, we will assign it a $\text{Uniform}(0,1)$ prior.
End of explanation
"""
true_answers = pm.Bernoulli("truths", p, size=N)
"""
Explanation: Again, thinking of our data-generation model, we assign Bernoulli random variables to the 100 students: 1 implies they cheated and 0 implies they did not.
End of explanation
"""
first_coin_flips = pm.Bernoulli("first_flips", 0.5, size=N)
print(first_coin_flips.value)
"""
Explanation: If we carry out the algorithm, the next step that occurs is the first coin-flip each student makes. This can be modeled again by sampling 100 Bernoulli random variables with $p=1/2$: denote a 1 as a Heads and 0 a Tails.
End of explanation
"""
second_coin_flips = pm.Bernoulli("second_flips", 0.5, size=N)
"""
Explanation: Although not everyone flips a second time, we can still model the possible realization of second coin-flips:
End of explanation
"""
@pm.deterministic
def observed_proportion(t_a=true_answers,
fc=first_coin_flips,
sc=second_coin_flips):
observed = fc * t_a + (1 - fc) * sc
return observed.sum() / float(N)
"""
Explanation: Using these variables, we can return a possible realization of the observed proportion of "Yes" responses. We do this using a PyMC deterministic variable:
End of explanation
"""
observed_proportion.value
"""
Explanation: The line fc*t_a + (1-fc)*sc contains the heart of the Privacy algorithm. Elements in this array are 1 if and only if i) the first toss is heads and the student cheated or ii) the first toss is tails, and the second is heads, and are 0 else. Finally, the last line sums this vector and divides by float(N), produces a proportion.
End of explanation
"""
X = 35
observations = pm.Binomial("obs", N, observed_proportion, observed=True,
value=X)
"""
Explanation: Next we need a dataset. After performing our coin-flipped interviews the researchers received 35 "Yes" responses. To put this into a relative perspective, if there truly were no cheaters, we should expect to see on average 1/4 of all responses being a "Yes" (half chance of having first coin land Tails, and another half chance of having second coin land Heads), so about 25 responses in a cheat-free world. On the other hand, if all students cheated, we should expect to see approximately 3/4 of all responses be "Yes".
The researchers observe a Binomial random variable, with N = 100 and p = observed_proportion with value = 35:
End of explanation
"""
model = pm.Model([p, true_answers, first_coin_flips,
second_coin_flips, observed_proportion, observations])
# To be explained in Chapter 3!
mcmc = pm.MCMC(model)
mcmc.sample(40000, 15000)
figsize(12.5, 3)
p_trace = mcmc.trace("freq_cheating")[:]
plt.hist(p_trace, histtype="stepfilled", normed=True, alpha=0.85, bins=30,
label="posterior distribution", color="#348ABD")
plt.vlines([.05, .35], [0, 0], [5, 5], alpha=0.3)
plt.xlim(0, 1)
plt.legend();
"""
Explanation: Below we add all the variables of interest to a Model container and run our black-box algorithm over the model.
End of explanation
"""
p = pm.Uniform("freq_cheating", 0, 1)
@pm.deterministic
def p_skewed(p=p):
return 0.5 * p + 0.25
"""
Explanation: With regards to the above plot, we are still pretty uncertain about what the true frequency of cheaters might be, but we have narrowed it down to a range between 0.05 to 0.35 (marked by the solid lines). This is pretty good, as a priori we had no idea how many students might have cheated (hence the uniform distribution for our prior). On the other hand, it is also pretty bad since there is a .3 length window the true value most likely lives in. Have we even gained anything, or are we still too uncertain about the true frequency?
I would argue, yes, we have discovered something. It is implausible, according to our posterior, that there are no cheaters, i.e. the posterior assigns low probability to $p=0$. Since we started with a uniform prior, treating all values of $p$ as equally plausible, but the data ruled out $p=0$ as a possibility, we can be confident that there were cheaters.
This kind of algorithm can be used to gather private information from users and be reasonably confident that the data, though noisy, is truthful.
Alternative PyMC Model
Given a value for $p$ (which from our god-like position we know), we can find the probability the student will answer yes:
\begin{align}
P(\text{"Yes"}) &= P( \text{Heads on first coin} )P( \text{cheater} ) + P( \text{Tails on first coin} )P( \text{Heads on second coin} ) \\
& = \frac{1}{2}p + \frac{1}{2}\frac{1}{2}\\
& = \frac{p}{2} + \frac{1}{4}
\end{align}
Thus, knowing $p$ we know the probability a student will respond "Yes". In PyMC, we can create a deterministic function to evaluate the probability of responding "Yes", given $p$:
End of explanation
"""
yes_responses = pm.Binomial("number_cheaters", 100, p_skewed,
value=35, observed=True)
"""
Explanation: I could have typed p_skewed = 0.5*p + 0.25 instead for a one-liner, as the elementary operations of addition and scalar multiplication will implicitly create a deterministic variable, but I wanted to make the deterministic boilerplate explicit for clarity's sake.
If we know the probability of respondents saying "Yes", which is p_skewed, and we have $N=100$ students, the number of "Yes" responses is a binomial random variable with parameters N and p_skewed.
This is where we include our observed 35 "Yes" responses. In the declaration of the pm.Binomial, we include value = 35 and observed = True.
End of explanation
"""
model = pm.Model([yes_responses, p_skewed, p])
# To Be Explained in Chapter 3!
mcmc = pm.MCMC(model)
mcmc.sample(25000, 2500)
figsize(12.5, 3)
p_trace = mcmc.trace("freq_cheating")[:]
plt.hist(p_trace, histtype="stepfilled", normed=True, alpha=0.85, bins=30,
label="posterior distribution", color="#348ABD")
plt.vlines([.05, .35], [0, 0], [5, 5], alpha=0.2)
plt.xlim(0, 1)
plt.legend();
"""
Explanation: Below we add all the variables of interest to a Model container and run our black-box algorithm over the model.
End of explanation
"""
N = 10
x = np.empty(N, dtype=object)
for i in range(0, N):
x[i] = pm.Exponential('x_%i' % i, (i + 1) ** 2)
"""
Explanation: More PyMC Tricks
Protip: Lighter deterministic variables with Lambda class
Sometimes writing a deterministic function using the @pm.deterministic decorator can seem like a chore, especially for a small function. I have already mentioned that elementary math operations can produce deterministic variables implicitly, but what about operations like indexing or slicing? Built-in Lambda functions can handle this with the elegance and simplicity required. For example,
beta = pm.Normal("coefficients", 0, size=(N, 1))
x = np.random.randn((N, 1))
linear_combination = pm.Lambda(lambda x=x, beta=beta: np.dot(x.T, beta))
Protip: Arrays of PyMC variables
There is no reason why we cannot store multiple heterogeneous PyMC variables in a Numpy array. Just remember to set the dtype of the array to object upon initialization. For example:
End of explanation
"""
figsize(12.5, 3.5)
np.set_printoptions(precision=3, suppress=True)
challenger_data = np.genfromtxt("data/challenger_data.csv", skip_header=1,
usecols=[1, 2], missing_values="NA",
delimiter=",")
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
# plot it, as a function of temperature (the first column)
print("Temp (F), O-Ring failure?")
print(challenger_data)
plt.scatter(challenger_data[:, 0], challenger_data[:, 1], s=75, color="k",
alpha=0.5)
plt.yticks([0, 1])
plt.ylabel("Damage Incident?")
plt.xlabel("Outside temperature (Fahrenheit)")
plt.title("Defects of the Space Shuttle O-Rings vs temperature");
"""
Explanation: The remainder of this chapter examines some practical examples of PyMC and PyMC modeling:
Example: Challenger Space Shuttle Disaster <span id="challenger"/>
On January 28, 1986, the twenty-fifth flight of the U.S. space shuttle program ended in disaster when one of the rocket boosters of the Shuttle Challenger exploded shortly after lift-off, killing all seven crew members. The presidential commission on the accident concluded that it was caused by the failure of an O-ring in a field joint on the rocket booster, and that this failure was due to a faulty design that made the O-ring unacceptably sensitive to a number of factors including outside temperature. Of the previous 24 flights, data were available on failures of O-rings on 23, (one was lost at sea), and these data were discussed on the evening preceding the Challenger launch, but unfortunately only the data corresponding to the 7 flights on which there was a damage incident were considered important and these were thought to show no obvious trend. The data are shown below (see [1]):
End of explanation
"""
figsize(12, 3)
def logistic(x, beta):
return 1.0 / (1.0 + np.exp(beta * x))
x = np.linspace(-4, 4, 100)
plt.plot(x, logistic(x, 1), label=r"$\beta = 1$")
plt.plot(x, logistic(x, 3), label=r"$\beta = 3$")
plt.plot(x, logistic(x, -5), label=r"$\beta = -5$")
plt.title("Logistic functon plotted for several value of $\\beta$ parameter", fontsize=14)
plt.legend();
"""
Explanation: It looks clear that the probability of damage incidents occurring increases as the outside temperature decreases. We are interested in modeling the probability here because it does not look like there is a strict cutoff point between temperature and a damage incident occurring. The best we can do is ask "At temperature $t$, what is the probability of a damage incident?". The goal of this example is to answer that question.
We need a function of temperature, call it $p(t)$, that is bounded between 0 and 1 (so as to model a probability) and changes from 1 to 0 as we increase temperature. There are actually many such functions, but the most popular choice is the logistic function.
$$p(t) = \frac{1}{ 1 + e^{ \;\beta t } } $$
In this model, $\beta$ is the variable we are uncertain about. Below is the function plotted for $\beta = 1, 3, -5$.
End of explanation
"""
def logistic(x, beta, alpha=0):
return 1.0 / (1.0 + np.exp(np.dot(beta, x) + alpha))
x = np.linspace(-4, 4, 100)
plt.plot(x, logistic(x, 1), label=r"$\beta = 1$", ls="--", lw=1)
plt.plot(x, logistic(x, 3), label=r"$\beta = 3$", ls="--", lw=1)
plt.plot(x, logistic(x, -5), label=r"$\beta = -5$", ls="--", lw=1)
plt.plot(x, logistic(x, 1, 1), label=r"$\beta = 1, \alpha = 1$",
color="#348ABD")
plt.plot(x, logistic(x, 3, -2), label=r"$\beta = 3, \alpha = -2$",
color="#A60628")
plt.plot(x, logistic(x, -5, 7), label=r"$\beta = -5, \alpha = 7$",
color="#7A68A6")
plt.title("Logistic functon with bias, plotted for several value of $\\alpha$ bias parameter", fontsize=14)
plt.legend(loc="lower left");
"""
Explanation: But something is missing. In the plot of the logistic function, the probability changes only near zero, but in our data above the probability changes around 65 to 70. We need to add a bias term to our logistic function:
$$p(t) = \frac{1}{ 1 + e^{ \;\beta t + \alpha } } $$
Some plots are below, with differing $\alpha$.
End of explanation
"""
import scipy.stats as stats
nor = stats.norm
x = np.linspace(-8, 7, 150)
mu = (-2, 0, 3)
tau = (.7, 1, 2.8)
colors = ["#348ABD", "#A60628", "#7A68A6"]
parameters = zip(mu, tau, colors)
for _mu, _tau, _color in parameters:
plt.plot(x, nor.pdf(x, _mu, scale=1. / np.sqrt(_tau)),
label="$\mu = %d,\;\\tau = %.1f$" % (_mu, _tau), color=_color)
plt.fill_between(x, nor.pdf(x, _mu, scale=1. / np.sqrt(_tau)), color=_color,
alpha=.33)
plt.legend(loc="upper right")
plt.xlabel("$x$")
plt.ylabel("density function at $x$")
plt.title("Probability distribution of three different Normal random \
variables");
"""
Explanation: Adding a constant term $\alpha$ amounts to shifting the curve left or right (hence why it is called a bias).
Let's start modeling this in PyMC. The $\beta, \alpha$ parameters have no reason to be positive, bounded or relatively large, so they are best modeled by a Normal random variable, introduced next.
Normal distributions
A Normal random variable, denoted $X \sim N(\mu, 1/\tau)$, has a distribution with two parameters: the mean, $\mu$, and the precision, $\tau$. Those familiar with the Normal distribution already have probably seen $\sigma^2$ instead of $\tau^{-1}$. They are in fact reciprocals of each other. The change was motivated by simpler mathematical analysis and is an artifact of older Bayesian methods. Just remember: the smaller $\tau$, the larger the spread of the distribution (i.e. we are more uncertain); the larger $\tau$, the tighter the distribution (i.e. we are more certain). Regardless, $\tau$ is always positive.
The probability density function of a $N( \mu, 1/\tau)$ random variable is:
$$ f(x | \mu, \tau) = \sqrt{\frac{\tau}{2\pi}} \exp\left( -\frac{\tau}{2} (x-\mu)^2 \right) $$
We plot some different density functions below.
End of explanation
"""
import pymc as pm
temperature = challenger_data[:, 0]
D = challenger_data[:, 1] # defect or not?
# notice the`value` here. We explain why below.
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
@pm.deterministic
def p(t=temperature, alpha=alpha, beta=beta):
return 1.0 / (1. + np.exp(beta * t + alpha))
"""
Explanation: A Normal random variable can be take on any real number, but the variable is very likely to be relatively close to $\mu$. In fact, the expected value of a Normal is equal to its $\mu$ parameter:
$$ E[ X | \mu, \tau] = \mu$$
and its variance is equal to the inverse of $\tau$:
$$Var( X | \mu, \tau ) = \frac{1}{\tau}$$
Below we continue our modeling of the Challenger space craft:
End of explanation
"""
p.value
# connect the probabilities in `p` with our observations through a
# Bernoulli random variable.
observed = pm.Bernoulli("bernoulli_obs", p, value=D, observed=True)
model = pm.Model([observed, beta, alpha])
# Mysterious code to be explained in Chapter 3
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(120000, 100000, 2)
"""
Explanation: We have our probabilities, but how do we connect them to our observed data? A Bernoulli random variable with parameter $p$, denoted $\text{Ber}(p)$, is a random variable that takes value 1 with probability $p$, and 0 else. Thus, our model can look like:
$$ \text{Defect Incident, $D_i$} \sim \text{Ber}( \;p(t_i)\; ), \;\; i=1..N$$
where $p(t)$ is our logistic function and $t_i$ are the temperatures we have observations about. Notice in the above code we had to set the values of beta and alpha to 0. The reason for this is that if beta and alpha are very large, they make p equal to 1 or 0. Unfortunately, pm.Bernoulli does not like probabilities of exactly 0 or 1, though they are mathematically well-defined probabilities. So by setting the coefficient values to 0, we set the variable p to be a reasonable starting value. This has no effect on our results, nor does it mean we are including any additional information in our prior. It is simply a computational caveat in PyMC.
End of explanation
"""
alpha_samples = mcmc.trace('alpha')[:, None] # best to make them 1d
beta_samples = mcmc.trace('beta')[:, None]
figsize(12.5, 6)
# histogram of the samples:
plt.subplot(211)
plt.title(r"Posterior distributions of the variables $\alpha, \beta$")
plt.hist(beta_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\beta$", color="#7A68A6", normed=True)
plt.legend()
plt.subplot(212)
plt.hist(alpha_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\alpha$", color="#A60628", normed=True)
plt.legend();
"""
Explanation: We have trained our model on the observed data, now we can sample values from the posterior. Let's look at the posterior distributions for $\alpha$ and $\beta$:
End of explanation
"""
t = np.linspace(temperature.min() - 5, temperature.max() + 5, 50)[:, None]
p_t = logistic(t.T, beta_samples, alpha_samples)
mean_prob_t = p_t.mean(axis=0)
figsize(12.5, 4)
plt.plot(t, mean_prob_t, lw=3, label="average posterior \nprobability \
of defect")
plt.plot(t, p_t[0, :], ls="--", label="realization from posterior")
plt.plot(t, p_t[-2, :], ls="--", label="realization from posterior")
plt.scatter(temperature, D, color="k", s=50, alpha=0.5)
plt.title("Posterior expected value of probability of defect; \
plus realizations")
plt.legend(loc="lower left")
plt.ylim(-0.1, 1.1)
plt.xlim(t.min(), t.max())
plt.ylabel("probability")
plt.xlabel("temperature");
"""
Explanation: All samples of $\beta$ are greater than 0. If instead the posterior was centered around 0, we may suspect that $\beta = 0$, implying that temperature has no effect on the probability of defect.
Similarly, all $\alpha$ posterior values are negative and far away from 0, implying that it is correct to believe that $\alpha$ is significantly less than 0.
Regarding the spread of the data, we are very uncertain about what the true parameters might be (though considering the low sample size and the large overlap of defects-to-nondefects this behaviour is perhaps expected).
Next, let's look at the expected probability for a specific value of the temperature. That is, we average over all samples from the posterior to get a likely value for $p(t_i)$.
End of explanation
"""
from scipy.stats.mstats import mquantiles
# vectorized bottom and top 2.5% quantiles for "confidence interval"
qs = mquantiles(p_t, [0.025, 0.975], axis=0)
plt.fill_between(t[:, 0], *qs, alpha=0.7,
color="#7A68A6")
plt.plot(t[:, 0], qs[0], label="95% CI", color="#7A68A6", alpha=0.7)
plt.plot(t, mean_prob_t, lw=1, ls="--", color="k",
label="average posterior \nprobability of defect")
plt.xlim(t.min(), t.max())
plt.ylim(-0.02, 1.02)
plt.legend(loc="lower left")
plt.scatter(temperature, D, color="k", s=50, alpha=0.5)
plt.xlabel("temp, $t$")
plt.ylabel("probability estimate")
plt.title("Posterior probability estimates given temp. $t$");
"""
Explanation: Above we also plotted two possible realizations of what the actual underlying system might be. Both are equally likely as any other draw. The blue line is what occurs when we average all the 20000 possible dotted lines together.
An interesting question to ask is for what temperatures are we most uncertain about the defect-probability? Below we plot the expected value line and the associated 95% intervals for each temperature.
End of explanation
"""
figsize(12.5, 2.5)
prob_31 = logistic(31, beta_samples, alpha_samples)
plt.xlim(0.995, 1)
plt.hist(prob_31, bins=1000, normed=True, histtype='stepfilled')
plt.title("Posterior distribution of probability of defect, given $t = 31$")
plt.xlabel("probability of defect occurring in O-ring");
"""
Explanation: The 95% credible interval, or 95% CI, painted in purple, represents the interval, for each temperature, that contains 95% of the distribution. For example, at 65 degrees, we can be 95% sure that the probability of defect lies between 0.25 and 0.75.
More generally, we can see that as the temperature nears 60 degrees, the CI's spread out over [0,1] quickly. As we pass 70 degrees, the CI's tighten again. This can give us insight about how to proceed next: we should probably test more O-rings around 60-65 temperature to get a better estimate of probabilities in that range. Similarly, when reporting to scientists your estimates, you should be very cautious about simply telling them the expected probability, as we can see this does not reflect how wide the posterior distribution is.
What about the day of the Challenger disaster?
On the day of the Challenger disaster, the outside temperature was 31 degrees Fahrenheit. What is the posterior distribution of a defect occurring, given this temperature? The distribution is plotted below. It looks almost guaranteed that the Challenger was going to be subject to defective O-rings.
End of explanation
"""
simulated = pm.Bernoulli("bernoulli_sim", p)
N = 10000
mcmc = pm.MCMC([simulated, alpha, beta, observed])
mcmc.sample(N)
figsize(12.5, 5)
simulations = mcmc.trace("bernoulli_sim")[:]
print(simulations.shape)
plt.title("Simulated dataset using posterior parameters")
figsize(12.5, 6)
for i in range(4):
ax = plt.subplot(4, 1, i + 1)
plt.scatter(temperature, simulations[1000 * i, :], color="k",
s=50, alpha=0.6)
"""
Explanation: Is our model appropriate?
The skeptical reader will say "You deliberately chose the logistic function for $p(t)$ and the specific priors. Perhaps other functions or priors will give different results. How do I know I have chosen a good model?" This is absolutely true. To consider an extreme situation, what if I had chosen the function $p(t) = 1,\; \forall t$, which guarantees a defect always occurring: I would have again predicted disaster on January 28th. Yet this is clearly a poorly chosen model. On the other hand, if I did choose the logistic function for $p(t)$, but specified all my priors to be very tight around 0, likely we would have very different posterior distributions. How do we know our model is an expression of the data? This encourages us to measure the model's goodness of fit.
We can think: how can we test whether our model is a bad fit? An idea is to compare observed data (which if we recall is a fixed stochastic variable) with an artificial dataset which we can simulate. The rationale is that if the simulated dataset does not appear similar, statistically, to the observed dataset, then likely our model is not accurately represented the observed data.
Previously in this Chapter, we simulated artificial datasets for the SMS example. To do this, we sampled values from the priors. We saw how varied the resulting datasets looked like, and rarely did they mimic our observed dataset. In the current example, we should sample from the posterior distributions to create very plausible datasets. Luckily, our Bayesian framework makes this very easy. We only need to create a new Stochastic variable, that is exactly the same as our variable that stored the observations, but minus the observations themselves. If you recall, our Stochastic variable that stored our observed data was:
observed = pm.Bernoulli( "bernoulli_obs", p, value=D, observed=True)
Hence we create:
simulated_data = pm.Bernoulli("simulation_data", p)
Let's simulate 10 000:
End of explanation
"""
posterior_probability = simulations.mean(axis=0)
print("posterior prob of defect | realized defect ")
for i in range(len(D)):
print("%.2f | %d" % (posterior_probability[i], D[i]))
"""
Explanation: Note that the above plots are different (if you can think of a cleaner way to present this, please send a pull request and answer here!).
We wish to assess how good our model is. "Good" is a subjective term of course, so results must be relative to other models.
We will be doing this graphically as well, which may seem like an even less objective method. The alternative is to use Bayesian p-values. These are still subjective, as the proper cutoff between good and bad is arbitrary. Gelman emphasises that the graphical tests are more illuminating [7] than p-value tests. We agree.
The following graphical test is a novel data-viz approach to logistic regression. The plots are called separation plots[8]. For a suite of models we wish to compare, each model is plotted on an individual separation plot. I leave most of the technical details about separation plots to the very accessible original paper, but I'll summarize their use here.
For each model, we calculate the proportion of times the posterior simulation proposed a value of 1 for a particular temperature, i.e. compute $P( \;\text{Defect} = 1 | t, \alpha, \beta )$ by averaging. This gives us the posterior probability of a defect at each data point in our dataset. For example, for the model we used above:
End of explanation
"""
ix = np.argsort(posterior_probability)
print("probb | defect ")
for i in range(len(D)):
print("%.2f | %d" % (posterior_probability[ix[i]], D[ix[i]]))
"""
Explanation: Next we sort each column by the posterior probabilities:
End of explanation
"""
from separation_plot import separation_plot
figsize(11., 1.5)
separation_plot(posterior_probability, D)
"""
Explanation: We can present the above data better in a figure: I've wrapped this up into a separation_plot function.
End of explanation
"""
figsize(11., 1.25)
# Our temperature-dependent model
separation_plot(posterior_probability, D)
plt.title("Temperature-dependent model")
# Perfect model
# i.e. the probability of defect is equal to if a defect occurred or not.
p = D
separation_plot(p, D)
plt.title("Perfect model")
# random predictions
p = np.random.rand(23)
separation_plot(p, D)
plt.title("Random model")
# constant model
constant_prob = 7. / 23 * np.ones(23)
separation_plot(constant_prob, D)
plt.title("Constant-prediction model");
"""
Explanation: The snaking-line is the sorted probabilities, blue bars denote defects, and empty space (or grey bars for the optimistic readers) denote non-defects. As the probability rises, we see more and more defects occur. On the right hand side, the plot suggests that as the posterior probability is large (line close to 1), then more defects are realized. This is good behaviour. Ideally, all the blue bars should be close to the right-hand side, and deviations from this reflect missed predictions.
The black vertical line is the expected number of defects we should observe, given this model. This allows the user to see how the total number of events predicted by the model compares to the actual number of events in the data.
It is much more informative to compare this to separation plots for other models. Below we compare our model (top) versus three others:
the perfect model, which predicts the posterior probability to be equal to 1 if a defect did occur.
a completely random model, which predicts random probabilities regardless of temperature.
a constant model: where $P(D = 1 \; | \; t) = c, \;\; \forall t$. The best choice for $c$ is the observed frequency of defects, in this case 7/23.
End of explanation
"""
# type your code here.
figsize(12.5, 4)
plt.scatter(alpha_samples, beta_samples, alpha=0.1)
plt.title("Why does the plot look like this?")
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$\beta$");
"""
Explanation: In the random model, we can see that as the probability increases there is no clustering of defects to the right-hand side. Similarly for the constant model.
The perfect model, the probability line is not well shown, as it is stuck to the bottom and top of the figure. Of course the perfect model is only for demonstration, and we cannot infer any scientific inference from it.
Exercises
1. Try putting in extreme values for our observations in the cheating example. What happens if we observe 25 affirmative responses? 10? 50?
2. Try plotting $\alpha$ samples versus $\beta$ samples. Why might the resulting plot look like this?
End of explanation
"""
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
"""
Explanation: References
[1] Dalal, Fowlkes and Hoadley (1989),JASA, 84, 945-957.
[2] German Rodriguez. Datasets. In WWS509. Retrieved 30/01/2013, from http://data.princeton.edu/wws509/datasets/#smoking.
[3] McLeish, Don, and Cyntha Struthers. STATISTICS 450/850 Estimation and Hypothesis Testing. Winter 2012. Waterloo, Ontario: 2012. Print.
[4] Fonnesbeck, Christopher. "Building Models." PyMC-Devs. N.p., n.d. Web. 26 Feb 2013. http://pymc-devs.github.com/pymc/modelbuilding.html.
[5] Cronin, Beau. "Why Probabilistic Programming Matters." 24 Mar 2013. Google, Online Posting to Google . Web. 24 Mar. 2013. https://plus.google.com/u/0/107971134877020469960/posts/KpeRdJKR6Z1.
[6] S.P. Brooks, E.A. Catchpole, and B.J.T. Morgan. Bayesian animal survival estimation. Statistical Science, 15: 357–376, 2000
[7] Gelman, Andrew. "Philosophy and the practice of Bayesian statistics." British Journal of Mathematical and Statistical Psychology. (2012): n. page. Web. 2 Apr. 2013.
[8] Greenhill, Brian, Michael D. Ward, and Audrey Sacks. "The Separation Plot: A New Visual Method for Evaluating the Fit of Binary Models." American Journal of Political Science. 55.No.4 (2011): n. page. Web. 2 Apr. 2013.
End of explanation
"""
|
mne-tools/mne-tools.github.io | stable/_downloads/0a1bad60270bfbdeeea274fcca0015d2/multidict_reweighted_tfmxne.ipynb | bsd-3-clause | # Author: Mathurin Massias <mathurin.massias@gmail.com>
# Yousra Bekhti <yousra.bekhti@gmail.com>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import os.path as op
import mne
from mne.datasets import somato
from mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles
from mne.viz import plot_sparse_source_estimates
print(__doc__)
"""
Explanation: Compute iterative reweighted TF-MxNE with multiscale time-frequency dictionary
The iterative reweighted TF-MxNE solver is a distributed inverse method
based on the TF-MxNE solver, which promotes focal (sparse) sources
:footcite:StrohmeierEtAl2015. The benefits of this approach are that:
it is spatio-temporal without assuming stationarity (source properties
can vary over time),
activations are localized in space, time, and frequency in one step,
the solver uses non-convex penalties in the TF domain, which results in a
solution less biased towards zero than when simple TF-MxNE is used,
using a multiscale dictionary allows to capture short transient
activations along with slower brain waves :footcite:BekhtiEtAl2016.
End of explanation
"""
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
fwd_fname = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
# Read evoked
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg=True, eog=True, stim=True)
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, eog=350e-6)
event_id, tmin, tmax = dict(unknown=1), -0.5, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=reject,
baseline=(None, 0))
evoked = epochs.average()
evoked.crop(tmin=0.0, tmax=0.2)
# Compute noise covariance matrix
cov = mne.compute_covariance(epochs, rank='info', tmax=0.)
del epochs, raw
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
"""
Explanation: Load somatosensory MEG data
End of explanation
"""
alpha, l1_ratio = 20, 0.05
loose, depth = 0.9, 1.
# Use a multiscale time-frequency dictionary
wsize, tstep = [4, 16], [2, 4]
n_tfmxne_iter = 10
# Compute TF-MxNE inverse solution with dipole output
dipoles, residual = tf_mixed_norm(
evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio,
n_tfmxne_iter=n_tfmxne_iter, loose=loose,
depth=depth, tol=1e-3,
wsize=wsize, tstep=tstep, return_as_dipoles=True,
return_residual=True)
"""
Explanation: Run iterative reweighted multidict TF-MxNE solver
End of explanation
"""
stc = make_stc_from_dipoles(dipoles, forward['src'])
plot_sparse_source_estimates(
forward['src'], stc, bgcolor=(1, 1, 1), opacity=0.1,
fig_name=f"irTF-MxNE (cond {evoked.comment})")
"""
Explanation: Generate stc from dipoles
End of explanation
"""
ylim = dict(grad=[-300, 300])
evoked.copy().pick_types(meg='grad').plot(
titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim)
residual.copy().pick_types(meg='grad').plot(
titles=dict(grad='Residuals: Gradiometers'), ylim=ylim)
"""
Explanation: Show the evoked response and the residual for gradiometers
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/agents/tutorials/5_replay_buffers_tutorial.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2021 The TF-Agents Authors.
End of explanation
"""
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from tf_agents import specs
from tf_agents.agents.dqn import dqn_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.networks import q_network
from tf_agents.replay_buffers import py_uniform_replay_buffer
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
"""
Explanation: 재현 버퍼
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/agents/tutorials/5_replay_buffers_tutorial"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/agents/tutorials/5_replay_buffers_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행하기</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/agents/tutorials/5_replay_buffers_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 소스 보기</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/agents/tutorials/5_replay_buffers_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드하기</a></td>
</table>
소개
강화 학습 알고리즘은 재현 버퍼를 사용하여 환경에서 정책을 실행할 때 경험의 궤적을 저장합니다. 훈련 중에, 에이전트의 경험을 "재현"하기 위해 궤적의 서브 세트(순차 서브 세트 또는 샘플)에 대해 재현 버퍼가 조회됩니다.
이 colab에서는 일반적인 API를 공유하는 python-backed 및 tensorflow-backed의 두 가지 유형의 재현 버퍼를 탐색합니다. 다음 섹션에서는 API, 각 버퍼 구현 및 데이터 수집 훈련 중에 API와 버퍼 구현을 사용하는 방법에 관해 설명합니다.
설정
아직 설치하지 않았다면, tf-agents를 설치합니다.
End of explanation
"""
data_spec = (
tf.TensorSpec([3], tf.float32, 'action'),
(
tf.TensorSpec([5], tf.float32, 'lidar'),
tf.TensorSpec([3, 2], tf.float32, 'camera')
)
)
batch_size = 32
max_length = 1000
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec,
batch_size=batch_size,
max_length=max_length)
"""
Explanation: 재현 버퍼 API
재현 버퍼 클래스에는 다음과 같은 정의 및 메서드가 있습니다.
```python
class ReplayBuffer(tf.Module):
"""Abstract base class for TF-Agents replay buffer."""
def init(self, data_spec, capacity):
"""Initializes the replay buffer.
Args:
data_spec: A spec or a list/tuple/nest of specs describing
a single item that can be stored in this buffer
capacity: number of elements that the replay buffer can hold.
"""
@property
def data_spec(self):
"""Returns the spec for items in the replay buffer."""
@property
def capacity(self):
"""Returns the capacity of the replay buffer."""
def add_batch(self, items):
"""Adds a batch of items to the replay buffer."""
def get_next(self,
sample_batch_size=None,
num_steps=None,
time_stacked=True):
"""Returns an item or batch of items from the buffer."""
def as_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None):
"""Creates and returns a dataset that returns entries from the buffer."""
def gather_all(self):
"""Returns all the items in buffer."""
return self._gather_all()
def clear(self):
"""Resets the contents of replay buffer"""
```
재현 버퍼 객체가 초기화될 때 저장할 요소의 data_spec 이 필요합니다. 이 사양은 버퍼에 추가될 궤적 요소의 TensorSpec에 해당합니다. 이 사양은 일반적으로 훈련 시 에이전트가 기대하는 형상, 유형 및 구조를 정의하는 에이전트의 agent.collect_data_spec를 통해 획득됩니다(나중에 자세히 설명).
TFUniformReplayBuffer
TFUniformReplayBuffer는 TF-Agents에서 가장 일반적으로 사용되는 재현 버퍼이므로 이 튜토리얼에서 사용합니다. TFUniformReplayBuffer에서 버퍼 스토리지의 지원은 tensorflow 변수에 의해 수행되므로 계산 그래프의 일부입니다.
버퍼는 요소의 배치를 저장하며 배치 세그먼트당 최대 용량 max_length 요소를 갖습니다. 따라서 총 버퍼 용량은 batch_size x max_length 요소입니다. 버퍼에 저장된 요소는 모두 일치하는 데이터 사양을 가져야 합니다. 재현 버퍼가 데이터 수집에 사용되는 경우, 사양은 에이전트의 수집 데이터 사양입니다.
버퍼 만들기:
TFUniformReplayBuffer를 만들려면 다음을 전달합니다.
버퍼가 저장할 데이터 요소의 사양
버퍼의 배치 크기에 해당하는 batch size
배치 세그먼트당 max_length 개수의 요소
다음은 샘플 데이터 사양, batch_size 32 및max_length 1000을 가진 TFUniformReplayBuffer를 생성하는 예제입니다.
End of explanation
"""
action = tf.constant(1 * np.ones(
data_spec[0].shape.as_list(), dtype=np.float32))
lidar = tf.constant(
2 * np.ones(data_spec[1][0].shape.as_list(), dtype=np.float32))
camera = tf.constant(
3 * np.ones(data_spec[1][1].shape.as_list(), dtype=np.float32))
values = (action, (lidar, camera))
values_batched = tf.nest.map_structure(lambda t: tf.stack([t] * batch_size),
values)
replay_buffer.add_batch(values_batched)
"""
Explanation: 버퍼에 쓰기:
재현 버퍼에 요소를 추가하기 위해 add_batch(items) 메서드를 사용합니다. 여기서 items는 버퍼에 추가할 항목의 배치를 나타내는 텐서의 목록/튜플/중첩입니다. items의 각 요소는 batch_size와 동일한 외부 차원을 가져야 하고 나머지 차원은 항목의 데이터 사양을 준수해야 합니다(재현 버퍼 생성자에 전달된 데이터 사양과 같음).
다음은 항목의 배치를 추가하는 예제입니다.
End of explanation
"""
# add more items to the buffer before reading
for _ in range(5):
replay_buffer.add_batch(values_batched)
# Get one sample from the replay buffer with batch size 10 and 1 timestep:
sample = replay_buffer.get_next(sample_batch_size=10, num_steps=1)
# Convert the replay buffer to a tf.data.Dataset and iterate through it
dataset = replay_buffer.as_dataset(
sample_batch_size=4,
num_steps=2)
iterator = iter(dataset)
print("Iterator trajectories:")
trajectories = []
for _ in range(3):
t, _ = next(iterator)
trajectories.append(t)
print(tf.nest.map_structure(lambda t: t.shape, trajectories))
# Read all elements in the replay buffer:
trajectories = replay_buffer.gather_all()
print("Trajectories from gather all:")
print(tf.nest.map_structure(lambda t: t.shape, trajectories))
"""
Explanation: 버퍼에서 읽기
TFUniformReplayBuffer에서 데이터를 읽는 방법에는 3가지가 있습니다.
get_next() - 버퍼에서 하나의 샘플을 반환합니다. 반환된 샘플 배치 크기 및 타임스텝 수는 이 메서드에 대한 인수를 통해 지정할 수 있습니다.
as_dataset() - 재현 버퍼를 tf.data.Dataset로 반환합니다. 그런 다음, 데이터세트 반복기를 만들고 버퍼에 있는 항목의 샘플을 반복할 수 있습니다.
gather_all() - 버퍼에 있는 모든 항목을 형상 [batch, time, data_spec]을 가진 Tensor로 반환합니다
다음은 이들 각 메서드를 사용하여 재현 버퍼에서 데이터를 읽는 방법의 예제입니다.
End of explanation
"""
replay_buffer_capacity = 1000*32 # same capacity as the TFUniformReplayBuffer
py_replay_buffer = py_uniform_replay_buffer.PyUniformReplayBuffer(
capacity=replay_buffer_capacity,
data_spec=tensor_spec.to_nest_array_spec(data_spec))
"""
Explanation: PyUniformReplayBuffer
PyUniformReplayBuffer는 TFUniformReplayBuffer와 기능은 같지만, tf 변수 대신 데이터가 numpy 배열에 저장됩니다. 이 버퍼는 그래프를 벗어난 데이터(out-of-graph data) 수집에 사용될 수 있습니다. 백업 스토리지를 numpy에 저장하면 일부 애플리케이션에서 Tensorflow 변수를 사용하지 않고 데이터 조작(예: 우선 순위 업데이트를 위한 인덱싱)을 보다 쉽게 수행할 수 있습니다. 그러나 이 구현에는 Tensorflow를 사용한 그래프 최적화의 이점이 없습니다.
다음은 에이전트의 정책 궤적 사양에서 PyUniformReplayBuffer를 인스턴스화하는 예제입니다.
End of explanation
"""
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
q_net = q_network.QNetwork(
tf_env.time_step_spec().observation,
tf_env.action_spec(),
fc_layer_params=(100,))
agent = dqn_agent.DqnAgent(
tf_env.time_step_spec(),
tf_env.action_spec(),
q_network=q_net,
optimizer=tf.compat.v1.train.AdamOptimizer(0.001))
replay_buffer_capacity = 1000
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
agent.collect_data_spec,
batch_size=tf_env.batch_size,
max_length=replay_buffer_capacity)
# Add an observer that adds to the replay buffer:
replay_observer = [replay_buffer.add_batch]
collect_steps_per_iteration = 10
collect_op = dynamic_step_driver.DynamicStepDriver(
tf_env,
agent.collect_policy,
observers=replay_observer,
num_steps=collect_steps_per_iteration).run()
"""
Explanation: 훈련 중 재현 버퍼 사용하기
이제 재현 버퍼를 작성하고, 재현 버퍼에/에서 항목을 쓰고 읽는 방법을 알았으므로 에이전트를 훈련하는 동안 궤적을 저장하는 데 사용할 수 있습니다.
데이터 수집
먼저 데이터 수집 중에 재현 버퍼를 사용하는 방법을 살펴보겠습니다.
TF-Agents에서는 환경에서 경험을 수집하기 위해 Driver(자세한 내용은 드라이버 튜토리얼 참조)를 사용합니다. Driver를 사용하려면, Driver가 궤적을 받을 때 실행하는 함수인 Observer를 지정합니다.
따라서 재현 버퍼에 궤적 요소를 추가하기 위해 add_batch(items)를 호출하여 재현 버퍼에 배치 항목을 추가하는 observer를 추가합니다.
아래는 TFUniformReplayBuffer를 사용한 예제입니다. 먼저 환경, 네트워크 및 에이전트를 만듭니다. 그런 다음, TFUniformReplayBuffer를 만듭니다. 재현 버퍼에 있는 궤적 요소의 사양은 에이전트의 수집 데이터 사양과 동일합니다. 그런 다음, add_batch 메서드를 훈련 중에 데이터 수집을 수행하는 드라이버의 observer로 설정합니다.
End of explanation
"""
# Read the replay buffer as a Dataset,
# read batches of 4 elements, each with 2 timesteps:
dataset = replay_buffer.as_dataset(
sample_batch_size=4,
num_steps=2)
iterator = iter(dataset)
num_train_steps = 10
for _ in range(num_train_steps):
trajectories, _ = next(iterator)
loss = agent.train(experience=trajectories)
"""
Explanation: train step에 대한 데이터 읽기
재현 버퍼에 궤적 요소를 추가한 후 재현 버퍼에서 궤적의 배치를 읽어 train step의 입력 데이터로 사용할 수 있습니다.
다음은 훈련 루프에서 재현 버퍼로부터 궤적에 대해 훈련하는 방법의 예제입니다.
End of explanation
"""
|
NorfolkDataSci/presentations | python-for-data-science/python-for-data-science.ipynb | mit | greeting = "Hello, "
here = "World!"
print greeting + here
letters = ["a", "b", "c"]
for letter in letters:
print letter + letter
def mySuperCoolFunction(i):
return i*i
for j in range(5):
print mySuperCoolFunction(j)
"""
Explanation: Python for data science
Dominion Data Science
Getting started
Install Anaconda
Anaconda is the go to "data science distribution" for python
Comes pre-loaded with numpy, pandas, sklearn and others
Includes additional tools such as jupyter notebooks
Mac OSX
If you have brew, go to terminal and run
brew cask install Caskroom/cask/anaconda
You might also need to add anaconda/bin to your PATH env variable
This is in the .bash_profile file
Open ~/.bash_profile in your favorite text editor
vi ~/.bash_profile
Add the following line
export PATH=~/anaconda3/bin:"$PATH"
Test your install by running python
You should see something like
Python 3.5.2 |Anaconda 4.2.0 (x86_64)| (default, Jul 2 2016, 17:52:12)
If not, confirm that your .bash_profile has been updated and and restart terminal
Windows
Head over to https://www.continuum.io/downloads and use the GUI downloader
Run the executable and follow the instructions. We can also set a PATH variable to make python easier to use
Windows cont.
On XP, right click My Computer -> Properties and navigate to the System Properties -> Advanced tab
<img src="img/system_properties.png" alt="System properties" width="40%" style="float: right"/>
Copy the following into the end of the PATH variable
;%PYTHON_HOME%\;%PYTHON_HOME%\Scripts
Test your install by opening a command prompt and running
C:\Users\Username>python
On Windows 7 and up, search for "environment variables" and follow the same steps
Getting additional packages
Anaconda comes with two ways to manage packages: pip and conda. Using these will..
* Insure your packages install correctly
* Allow you to easily upgrade and uninstall packages
* Manage dependencies when install
Mac
pip install/uninstall <package> [--upgrade]
Or..
conda install/uninstall <package>
Pip is also useful for other things. Example: pip freeze will give you the version of everything you currently have installed.
Windows
pip can also be used on Windows if you properly set up your ENV variable during installation. You can also run it from within git bash or cygwin if you have those installed.
Running python
End of explanation
"""
# my super awesome script
greeting = "Hello, "
location = "World!"
print greeting + location
"""
Explanation: Running scripts
python hello_world.py
End of explanation
"""
import pandas as pd
import numpy as np
from sklearn import datasets, linear_model
import tweepy
"""
Explanation: Importing libraries
End of explanation
"""
import tweepy
"""
Explanation: pip install tweepy
End of explanation
"""
import pandas as pd
aw_excel = pd.read_excel("adventureworks.xls")
aw_csv = pd.read_csv("adventureworks.csv")
import psycopg2 as psql
connection = psql.connect(host="ec2-23-21-219-105.compute-1.amazonaws.com",
user="dcppkvqqofzvbp",
password="12709a63bbb19e91b950008dab2d0301df1d48a6853d43cdf43964fcc863b6db",
dbname="d6bkkvg5ahrfdo")
import psycopg2 as psql
connection = psql.connect(host="host",
user="user",
password="psw",
dbname="database")
queryText = """
SELECT TO_CHAR(th.transactiondate, 'YYYY-MM') AS month
, p.name
, COUNT(*) AS total
FROM production.transactionhistory th
INNER JOIN production.product p
ON th.productid = p.productid
WHERE DATE_PART('year', transactiondate) = '2014'
GROUP BY TO_CHAR(th.transactiondate, 'YYYY-MM'), p.name
;
"""
aw_db = pd.read_sql(queryText, connection)
"""
Explanation: Jupyter qtconsole
jupyter qtconsole
Allows for faster, easier interactive programming
Graphics can be displayed inline
Tab completion
Outputs can be saved to file
Jupyter notebook
jupyter notebook
Allows for interactive, iterative code development
Keeps analysis, code, and visuals all together
Easy to package up project to share and present
Support multiple languages (R, Python, Julia, Go, Scala..)
Adventure Works
End of explanation
"""
# this can be re-used with an query and any connection
def query(dbconn, script):
with open(script,'r') as sql:
query = sql.read()
data = pd.read_sql(query, dbconn)
return data
aw_db = query(connection, "queries/adventureworks.sql")
"""
Explanation: Even better.. let's make a reusable function!
End of explanation
"""
def explore(dataset):
print "Columns: " + " | ".join(dataset.columns)
print "Summary statistics \n"
print dataset.describe()
explore(aw_db)
"""
Explanation: Exploring data
Its much easier now! Data from different sources is now in the same format with the same standard toolset
This makes analysis easier to re-use and standardize
End of explanation
"""
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
fig, axs = plt.subplots(1,2,figsize=(16,5))
aw_db = aw_db.set_index(pd.DatetimeIndex(aw_db['month']))
aw_db.resample('M').sum().plot(ax=axs[0], title="Sales by month");
aw_db["2014-01":"2014-03"].resample('M').sum().plot(ax=axs[1], title="First QTR Sales");
"""
Explanation: Panda panda pandas
Pandas comes with great, out of the box support for data analysis
End of explanation
"""
fig, axs = plt.subplots(1,2,figsize=(16,5))
aw_db.groupby("name").sum().sort_values(by="total", ascending = False)[:20].plot(kind="bar", ax=axs[0]);
bymonth = aw_db.groupby(["month","name"]).sum().reset_index()
bymonth = bymonth.set_index(pd.DatetimeIndex(bymonth.month))
bymonth[bymonth.name == "Water Bottle - 30 oz."].plot(ax=axs[1]);
"""
Explanation: What were our top 20 selling products, and how did the top product sell throughout the year?
End of explanation
"""
from sklearn import linear_model
reg = linear_model.LinearRegression()
aw_db['month'] = pd.to_datetime(aw_db['month'])
reg.fit(aw_db['month'].values.reshape(-1,1), aw_db['total'])
print reg.coef_
"""
Explanation: Beyond analysis
Pandas is well integrated with other python libraries, such as sklearn
This makes building machine learning models quick and easy
End of explanation
"""
|
ml4a/ml4a-guides | examples/reinforcement_learning/deep_q_networks.ipynb | gpl-2.0 | import numpy as np
from blessings import Terminal
class Game():
def __init__(self, shape=(10,10)):
self.shape = shape
self.height, self.width = shape
self.last_row = self.height - 1
self.paddle_padding = 1
self.n_actions = 3 # left, stay, right
self.term = Terminal()
self.reset()
def reset(self):
# reset grid
self.grid = np.zeros(self.shape)
# can only move left or right (or stay)
# so position is only its horizontal position (col)
self.pos = np.random.randint(self.paddle_padding, self.width - 1 - self.paddle_padding)
self.set_paddle(1)
# item to catch
self.target = (0, np.random.randint(self.width - 1))
self.set_position(self.target, 1)
def move(self, action):
# clear previous paddle position
self.set_paddle(0)
# action is either -1, 0, 1,
# but comes in as 0, 1, 2, so subtract 1
action -= 1
self.pos = min(max(self.pos + action, self.paddle_padding), self.width - 1 - self.paddle_padding)
# set new paddle position
self.set_paddle(1)
def set_paddle(self, val):
for i in range(1 + self.paddle_padding*2):
pos = self.pos - self.paddle_padding + i
self.set_position((self.last_row, pos), val)
@property
def state(self):
return self.grid.reshape((1,-1)).copy()
def set_position(self, pos, val):
r, c = pos
self.grid[r,c] = val
def update(self):
r, c = self.target
self.set_position(self.target, 0)
self.set_paddle(1) # in case the target is on the paddle
self.target = (r+1, c)
self.set_position(self.target, 1)
# off the map, it's gone
if r + 1 == self.last_row:
# reward of 1 if collided with paddle, else -1
if abs(c - self.pos) <= self.paddle_padding:
return 1
else:
return -1
return 0
def render(self):
print(self.term.clear())
for r, row in enumerate(self.grid):
for c, on in enumerate(row):
if on:
color = 235
else:
color = 229
print(self.term.move(r, c) + self.term.on_color(color) + ' ' + self.term.normal)
# move cursor to end
print(self.term.move(self.height, 0))
"""
Explanation: Reinforcement Learning: Deep Q-Networks
If you aren't familiar with reinforcement learning, check out the previous guide on reinforcement learning for an introduction.
In the previous guide we implemented the Q function as a lookup table. That worked well enough for that scenario because it had a fairly small state space. However, consider something like DeepMind's Atari player. A state in that task is a unique configuration of pixels. All those Atari games are color, so each pixel has three values (R,G,B), and there are quite a few pixels. So there is a massive state space for all possible configurations of pixels, and we simply can't implement a lookup table encompassing all of these states - it would take up too much memory.
Instead, we can learn a Q function that approximately maps a set of pixel values and an action to some value. We could implement this Q function as a neural network and have it learn how to predict rewards for each action given an input state. This is the general idea behind deep Q-learning (i.e. deep Q networks, or DQNs).
Here we'll put together a simple DQN agent that learns how to play a simple game of catch. The agent controls a paddle at the bottom of the screen that it can move left, right, or not at all (so there are three possible action). An object falls from the top of the screen, and the agent wins if it catches it (a reward of +1). Otherwise, it loses (a reward of -1).
We'll implement the game in black-and-white so that the pixels in the game can be represented as 1 or 0.
Using DQNs are quite like using neural networks in ways you may be more familiar with. Here we'll take a vector that represents the screen, feed it through the network, and the network will output a distribution of values over possible actions. You can kind of think of it as a classification problem: given this input state, label it with the best action to take.
For example, this is the architecture of the Atari player:
The scenario we're dealing with is simple enough that we don't need convolutional neural networks, but we could easily extend it in that way if we wanted (just replace our vanilla neural network with a convolutional one).
Here's what our catch game will look like:
To start I'll present the code for the catch game itself. It's not important that you understand this code - the part we care about is the agent itself.
Note that this needs to be run in the terminal in order to visualize the game.
End of explanation
"""
import os
#if using Theano with GPU
#os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=gpu,floatX=float32"
import random
from keras.models import Sequential
from keras.layers.core import Dense
from collections import deque
class Agent():
def __init__(self, env, explore=0.1, discount=0.9, hidden_size=100, memory_limit=5000):
self.env = env
model = Sequential()
model.add(Dense(hidden_size, input_shape=(env.height * env.width,), activation='relu'))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(env.n_actions))
model.compile(loss='mse', optimizer='sgd')
self.Q = model
# experience replay:
# remember states to "reflect" on later
self.memory = deque([], maxlen=memory_limit)
self.explore = explore
self.discount = discount
def choose_action(self):
if np.random.rand() <= self.explore:
return np.random.randint(0, self.env.n_actions)
state = self.env.state
q = self.Q.predict(state)
return np.argmax(q[0])
def remember(self, state, action, next_state, reward):
# the deque object will automatically keep a fixed length
self.memory.append((state, action, next_state, reward))
def _prep_batch(self, batch_size):
if batch_size > self.memory.maxlen:
Warning('batch size should not be larger than max memory size. Setting batch size to memory size')
batch_size = self.memory.maxlen
batch_size = min(batch_size, len(self.memory))
inputs = []
targets = []
# prep the batch
# inputs are states, outputs are values over actions
batch = random.sample(list(self.memory), batch_size)
random.shuffle(batch)
for state, action, next_state, reward in batch:
inputs.append(state)
target = self.Q.predict(state)[0]
# debug, "this should never happen"
assert not np.array_equal(state, next_state)
# non-zero reward indicates terminal state
if reward:
target[action] = reward
else:
# reward + gamma * max_a' Q(s', a')
Q_sa = np.max(self.Q.predict(next_state)[0])
target[action] = reward + self.discount * Q_sa
targets.append(target)
# to numpy matrices
return np.vstack(inputs), np.vstack(targets)
def replay(self, batch_size):
inputs, targets = self._prep_batch(batch_size)
loss = self.Q.train_on_batch(inputs, targets)
return loss
def save(self, fname):
self.Q.save_weights(fname)
def load(self, fname):
self.Q.load_weights(fname)
print(self.Q.get_weights())
"""
Explanation: Ok, on to the agent itself. I'll present the code in full here, then explain parts in more detail.
End of explanation
"""
import os
import sys
from time import sleep
game = Game()
agent = Agent(game)
print('training...')
epochs = 6500
batch_size = 256
fname = 'game_weights.h5'
# keep track of past record_len results
record_len = 100
record = deque([], record_len)
for i in range(epochs):
game.reset()
reward = 0
loss = 0
# rewards only given at end of game
while reward == 0:
prev_state = game.state
action = agent.choose_action()
game.move(action)
reward = game.update()
new_state = game.state
# debug, "this should never happen"
assert not np.array_equal(new_state, prev_state)
agent.remember(prev_state, action, new_state, reward)
loss += agent.replay(batch_size)
# if running in a terminal, use these instead of print:
#sys.stdout.flush()
#sys.stdout.write('epoch: {:04d}/{} | loss: {:.3f} | win rate: {:.3f}\r'.format(i+1, epochs, loss, sum(record)/len(record) if record else 0))
if i % 100 == 0:
print('epoch: {:04d}/{} | loss: {:.3f} | win rate: {:.3f}\r'.format(i+1, epochs, loss, sum(record)/len(record) if record else 0))
record.append(reward if reward == 1 else 0)
agent.save(fname)
"""
Explanation: You'll see that this is quite similar to the previous Q-learning agent we implemented. There are explore and discount values, for example. But the Q function is now a neural network.
The biggest difference are these remember and replay methods.
A challenge with DQNs is that they can be unstable - in particular, they exhibit a problem known as catastrophic forgetting in which later experiences overwrite earlier ones. When this happens, the agent is unable to take full advantage of everything it's learned, only what it's learned most recently.
A method to deal with this is called experience replay. We just store experienced states and their resulting rewards (as "memories"), then between actions we sample a batch of these memories (this is what the _prep_batch method does) and use them to train the neural network (i.e. "replay" these remembered experiences). This will become clearer in the code below, where we actually train the agent.
End of explanation
"""
# play a round
game.reset()
#game.render() # rendering won't work inside a notebook, only from terminal. uncomment
reward = 0
while reward == 0:
action = agent.choose_action()
game.move(action)
reward = game.update()
#game.render()
sleep(0.1)
print('winner!' if reward == 1 else 'loser!')
"""
Explanation: Here we train the agent for 6500 epochs (that is, 6500 games). We also keep a trailing record of its wins to see if its win rate is improving.
A game goes on until the reward is non-zero, which means the agent has either lost (reward of -1) or won (reward of +1). Note that between each action the agent "remembers" the states and reward it just saw, as well as the action it took. Then it "replays" past experiences to update its neural network.
Once the agent is trained, we can play a round and see if it wins.
Depicted below are the results from my training:
End of explanation
"""
|
AlJohri/DAT-DC-12 | homework/homework2_solutions.ipynb | mit | len(titles)
"""
Explanation: How many movies are listed in the titles dataframe?
End of explanation
"""
titles.sort_values(by='year').head(2)
"""
Explanation: What are the earliest two films listed in the titles dataframe?
End of explanation
"""
len(titles[titles.title == "Hamlet"])
"""
Explanation: How many movies have the title "Hamlet"?
End of explanation
"""
len(titles[titles.title == "North by Northwest"])
"""
Explanation: How many movies are titled "North by Northwest"?
End of explanation
"""
titles[titles.title == "Hamlet"].year.min()
"""
Explanation: When was the first movie titled "Hamlet" made?
End of explanation
"""
titles[titles.title == "Treasure Island"].sort_values(by='year')
"""
Explanation: List all of the "Treasure Island" movies from earliest to most recent.
End of explanation
"""
len(titles[titles.year == 1950])
"""
Explanation: How many movies were made in the year 1950?
End of explanation
"""
len(titles[titles.year == 1960])
"""
Explanation: How many movies were made in the year 1960?
End of explanation
"""
len(titles[(titles.year >= 1950) & (titles.year <= 1959)])
"""
Explanation: How many movies were made from 1950 through 1959?
End of explanation
"""
titles[titles.title == "Batman"]
"""
Explanation: In what years has a movie titled "Batman" been released?
End of explanation
"""
len(cast[cast.title == "Inception"])
"""
Explanation: How many roles were there in the movie "Inception"?
End of explanation
"""
len(cast[(cast.title == "Inception") & (cast.n.isnull())])
"""
Explanation: How many roles in the movie "Inception" are NOT ranked by an "n" value?
End of explanation
"""
len(cast[(cast.title == "Inception") & (cast.n.notnull())])
"""
Explanation: But how many roles in the movie "Inception" did receive an "n" value?
End of explanation
"""
cast[(cast.title == "North by Northwest") & (cast.n.notnull())].sort_values(by='n')
"""
Explanation: Display the cast of "North by Northwest" in their correct "n"-value order, ignoring roles that did not earn a numeric "n" value.
End of explanation
"""
cast[(cast.title == "Sleuth") & (cast.year == 1972) & (cast.n.notnull())].sort_values(by='n')
"""
Explanation: Display the entire cast, in "n"-order, of the 1972 film "Sleuth".
End of explanation
"""
cast[(cast.title == "Sleuth") & (cast.year == 2007) & (cast.n.notnull())].sort_values(by='n')
"""
Explanation: Now display the entire cast, in "n"-order, of the 2007 version of "Sleuth".
End of explanation
"""
len(cast[(cast.title == "Hamlet") & (cast.year == 1921)])
"""
Explanation: How many roles were credited in the silent 1921 version of Hamlet?
End of explanation
"""
len(cast[(cast.title == "Hamlet") & (cast.year == 1996)])
"""
Explanation: How many roles were credited in Branagh’s 1996 Hamlet?
End of explanation
"""
len(cast[cast.character == "Hamlet"])
"""
Explanation: How many "Hamlet" roles have been listed in all film credits through history?
End of explanation
"""
len(cast[cast.character == "Ophelia"])
"""
Explanation: How many people have played an "Ophelia"?
End of explanation
"""
len(cast[cast.character == "The Dude"])
"""
Explanation: How many people have played a role called "The Dude"?
End of explanation
"""
len(cast[cast.character == "The Stranger"])
"""
Explanation: How many people have played a role called "The Stranger"?
End of explanation
"""
len(cast[cast.name == "Sidney Poitier"])
"""
Explanation: How many roles has Sidney Poitier played throughout his career?
End of explanation
"""
len(cast[cast.name == "Judi Dench"])
"""
Explanation: How many roles has Judi Dench played?
End of explanation
"""
cast[
(cast.name == 'Cary Grant') &
(cast.year // 10 == 194) &
(cast.n == 2)
].sort_values(by='year')
"""
Explanation: List the supporting roles (having n=2) played by Cary Grant in the 1940s, in order by year.
End of explanation
"""
cast[
(cast.name == 'Cary Grant') &
(cast.year // 10 == 194) &
(cast.n == 1)
].sort_values(by='year')
"""
Explanation: List the leading roles that Cary Grant played in the 1940s in order by year.
End of explanation
"""
len(cast[
(cast.year // 10 == 195) &
(cast.type == "actor")
])
"""
Explanation: How many roles were available for actors in the 1950s?
End of explanation
"""
len(cast[
(cast.year // 10 == 195) &
(cast.type == "actress")
])
"""
Explanation: How many roles were avilable for actresses in the 1950s?
End of explanation
"""
len(cast[
(cast.year <= 1980) &
(cast.n == 1)
])
"""
Explanation: How many leading roles (n=1) were available from the beginning of film history through 1980?
End of explanation
"""
len(cast[
(cast.year <= 1980) &
(cast.n != 1)
])
"""
Explanation: How many non-leading roles were available through from the beginning of film history through 1980?
End of explanation
"""
len(cast[
(cast.year <= 1980) &
(cast.n.isnull())
])
"""
Explanation: How many roles through 1980 were minor enough that they did not warrant a numeric "n" rank?
End of explanation
"""
|
AntArch/Presentations_Github | 20160202_Nottingham_GIServices_Lecture3_Beck_InteroperabilitySemanticsAndOpenData/.ipynb_checkpoints/20151008_OpenGeo_Reuse_under_licence-checkpoint_conflict-20151001-162455.ipynb | cc0-1.0 | from IPython.display import YouTubeVideo
YouTubeVideo('F4rFuIb1Ie4')
## PDF output using pandoc
import os
### Export this notebook as markdown
commandLineSyntax = 'ipython nbconvert --to markdown 20151008_OpenGeo_Reuse_under_licence.ipynb'
print (commandLineSyntax)
os.system(commandLineSyntax)
### Export this notebook and the document header as PDF using Pandoc
commandLineSyntax = 'pandoc -f markdown -t latex -N -V geometry:margin=1in DocumentHeader.md 20151008_OpenGeo_Reuse_under_licence.md --filter pandoc-citeproc --latex-engine=xelatex --toc -o interim.pdf '
os.system(commandLineSyntax)
### Remove cruft from the pdf
commandLineSyntax = 'pdftk interim.pdf cat 1-3 16-end output 20151008_OpenGeo_Reuse_under_licence.pdf'
os.system(commandLineSyntax)
### Remove the interim pdf
commandLineSyntax = 'rm interim.pdf'
os.system(commandLineSyntax)
"""
Explanation: Go down for licence and other metadata about this presentation
\newpage
Preamble
Licence
Unless stated otherwise all content is released under a [CC0]+BY licence. I'd appreciate it if you reference this but it is not necessary.
\newpage
Using Ipython for presentations
A short video showing how to use Ipython for presentations
End of explanation
"""
%install_ext https://raw.githubusercontent.com/rasbt/python_reference/master/ipython_magic/watermark.py
%load_ext watermark
%watermark -a "Anthony Beck" -d -v -m -g
#List of installed conda packages
!conda list
#List of installed pip packages
!pip list
"""
Explanation: The environment
In order to replicate my environment you need to know what I have installed!
Set up watermark
This describes the versions of software used during the creation.
Please note that critical libraries can also be watermarked as follows:
python
%watermark -v -m -p numpy,scipy
End of explanation
"""
!ipython nbconvert 20151008_OpenGeo_Reuse_under_licence.ipynb --to slides --post serve
"""
Explanation: Running dynamic presentations
You need to install the RISE Ipython Library from Damián Avila for dynamic presentations
To convert and run this as a static presentation run the following command:
End of explanation
"""
#Future proof python 2
from __future__ import print_function #For python3 print syntax
from __future__ import division
# def
import IPython.core.display
# A function to collect user input - ipynb_input(varname='username', prompt='What is your username')
def ipynb_input(varname, prompt=''):
"""Prompt user for input and assign string val to given variable name."""
js_code = ("""
var value = prompt("{prompt}","");
var py_code = "{varname} = '" + value + "'";
IPython.notebook.kernel.execute(py_code);
""").format(prompt=prompt, varname=varname)
return IPython.core.display.Javascript(js_code)
# inline
%pylab inline
"""
Explanation: To close this instances press control 'c' in the ipython notebook terminal console
Static presentations allow the presenter to see speakers notes (use the 's' key)
If running dynamically run the scripts below
Pre load some useful libraries
End of explanation
"""
from IPython.display import YouTubeVideo
YouTubeVideo('jUzGF401vLc')
"""
Explanation: \newpage
About me
Research Fellow, University of Nottingham: orcid
Director, Geolytics Limited - A spatial data analytics consultancy
About this presentation
Available on GitHub - https://github.com/AntArch/Presentations_Github/
Fully referenced PDF
\newpage
A potted history of mapping
In the beginning was the geoword
and the word was cartography
\newpage
Cartography was king. Static representations of spatial knowledge with the cartographer deciding what to represent.
\newpage
And then there was data .........
\newpage
Restrictive data
\newpage
Making data interoperable and open
\newpage
Technical interoperability - levelling the field
\newpage
Facilitating data driven visualization
From Map to Model The changing paradigm of map creation from cartography to data driven visualization
\newpage
\newpage
\newpage
\newpage
What about non-technical interoperability issues?
Issues surrounding non-technical interoperability include:
Policy interoperabilty
Licence interoperability
Legal interoperability
Social interoperability
We will focus on licence interoperability
\newpage
There is a multitude of formal and informal data.
\newpage
Each of these data objects can be licenced in a different way. This shows some of the licences described by the RDFLicence ontology
\newpage
What is a licence?
Wikipedia state:
A license may be granted by a party ("licensor") to another party ("licensee") as an element of an agreement between those parties.
A shorthand definition of a license is "an authorization (by the licensor) to use the licensed material (by the licensee)."
Concepts (derived from Formal Concept Analysis) surrounding licences
\newpage
Two lead organisations have developed legal frameworks for content licensing:
Creative Commons (CC) and
Open Data Commons (ODC).
Until the release of CC version 4, published in November 2013, the CC licence did not cover data. Between them, CC and ODC licences can cover all forms of digital work.
There are many other licence types
Many are bespoke
Bespoke licences are difficult to manage
Many legacy datasets have bespoke licences
I'll describe CC in more detail
\newpage
Creative Commons Zero
Creative Commons Zero (CC0) is essentially public domain which allows:
Reproduction
Distribution
Derivations
Constraints on CC0
The following clauses constrain CC0:
Permissions
ND – No derivatives: the licensee can not derive new content from the resource.
Requirements
BY – By attribution: the licensee must attribute the source.
SA – Share-alike: if the licensee adapts the resource, it must be released under the same licence.
Prohibitions
NC – Non commercial: the licensee must not use the work commercially without prior approval.
CC license combinations
License|Reproduction|Distribution|Derivation|ND|BY|SA|NC
----|----|----|----|----|----|----|----
CC0|X|X|X||||
CC-BY-ND|X|X||X|X||
CC-BY-NC-ND|X|X||X|X||X
CC-BY|X|X|X||X||
CC-BY-SA|X|X|X||X|X|
CC-BY-NC|X|X|X||X||X
CC-BY-NC-SA|X|X|X||X|X|X
Table: Creative Commons license combinations
\newpage
Why are licenses important?
They tell you what you can and can't do with 'stuff'
Very significant when multiple datasets are combined
It then becomes an issue of license compatibility
\newpage
Which is important when we mash up data
Certain licences when combined:
Are incompatible
Creating data islands
Inhibit commercial exploitation (NC)
Force the adoption of certain licences
If you want people to commercially exploit your stuff don't incorporate CC-BY-NC-SA data!
Stops the derivation of new works
A conceptual licence processing workflow. The licence processing service analyses the incoming licence metadata and determines if the data can be legally integrated and any resulting licence implications for the derived product.
\newpage
A rudimentry logic example
```
Data1 hasDerivedContentIn NewThing.
Data1 hasLicence a cc-by-sa.
What hasLicence a cc-by-sa? #reason here
If X hasDerivedContentIn Y and hasLicence Z then Y hasLicence Z. #reason here
Data2 hasDerivedContentIn NewThing.
Data2 hasLicence a cc-by-nc-sa.
What hasLicence a cc-by-nc-sa? #reason here
Nothing hasLicence a cc-by-nc-sa and hasLicence a cc-by-sa. #reason here
```
And processing this within the Protege reasoning environment
End of explanation
"""
from IPython.display import YouTubeVideo
YouTubeVideo('tkRB5Rp1_W4')
"""
Explanation: \newpage
Here's something I prepared earlier
A live presentation (for those who weren't at the event).....
End of explanation
"""
|
jvcarr/portfolio | projects/Indeed-Scraping-Clean.ipynb | mit | # libraries to import
# related to webscraping - to acquire data
import requests
import bs4
from bs4 import BeautifulSoup
# for working with and visualizing data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# for modeling
from sklearn.cross_validation import cross_val_score, StratifiedKFold , train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# cleaning up the notebook
import warnings
warnings.filterwarnings('ignore')
"""
Explanation: Web Scraping Indeed Data Science Salaries
The goal of the following project was to acquire Indeed.com job listings, discover what I could about data scientist salaries from the job listings that did include salary data, and then predict whether salaries that did not include listings were above or below the median salary.
First, I imported all necessary libraries.
End of explanation
"""
# string of the indeed.com URL we want to search across cities
url_template = "http://www.indeed.com/jobs?q=data+scientist+%2420%2C000&l={}&start={}"
max_results_per_city = 1000
# each result is a set of 10 job listings from one page
results = []
# results city allows cleaner analysis of jobs by location later - what city was searched for?
result_city = []
# loop through set of cities to get job postings
for city in set(['New+York', 'Chicago', 'San+Francisco', 'Austin', 'Seattle',
'Los+Angeles', 'Philadelphia', 'Atlanta', 'Dallas', 'Boston', 'San+Jose',
'San+Diego','San+Antonio','Portland', 'Phoenix', 'Denver', 'Houston', 'Washington+DC']):
for start in range(0, max_results_per_city, 10):
# Grab the results from the request
url = url_template.format(city, start)
job = requests.get(url)
# Append to the full set of results
b = BeautifulSoup(job.text)
results.append(b)
result_city.append(city)
pass
"""
Explanation: Next, I wrote a scraper to obtain job postings for data scientist positions from several different markets.
End of explanation
"""
# create empty dataframe
indeed = pd.DataFrame(columns = ['title','location','search_city','company','salary'])
# loop through results list to extract information
for i in results:
indx = results.index(i)
for job in i.find_all('div', {'class' : ' row result'}):
title = job.find('a', {'class':'turnstileLink'}).text
location = job.find('span', {'class': 'location'}).text
search_city = result_city[indx]
try:
company = job.find('span', {'class' : 'company'}).text.strip()
except:
company = 'NA'
salary = job.find('nobr')
# add result to end of dataframe
indeed.loc[len(indeed)]=[title, location, search_city, company, salary]
"""
Explanation: The cell below creates an empty dataframe with columns for all the relevant information that I need, and then loops through the scraped results to add each job to the dataframe.
End of explanation
"""
## Extracting all fields with missing salaries for analysis and estimation later
indeed.salary = indeed.salary.astype(str)
indeed_missing = indeed[indeed['salary'] == 'None']
## Drop duplicate scraped records
indeed_missing[['title','location','company','salary']].drop_duplicates(inplace = True)
"""
Explanation: Below creates a dataframe for all jobs without salaries, then drops duplicate records from that dataframe.
End of explanation
"""
## Getting only annual salaries, stripping all information out aside from numbers and dash to mark range
indeed_year = indeed[indeed['salary'].str.contains("a year", na=False)]
indeed_year.salary = indeed_year.salary.replace('[/$<nobraye>,]', '', regex = True)
indeed_year.search_city = indeed_year.search_city.replace('+', ' ', regex = True)
"""
Explanation: Below creates a dataframe with only jobs that list annual salaries, then strips out all extra items from the salary string aside from the salary numbers and a '-' indicating if there is a salary range.
The second step removes the '+' from the cities that were searched for.
End of explanation
"""
## function to get average salaries when applicable
## try to split on dash, get mean
def sal_split(i):
try:
splt = i.split(' - ',1)
low = float(splt[0])
high = float(splt[1])
return (low + high)/2
except:
return float(i)
## apply above function to all salaries in df
indeed_year['salary'] = indeed_year['salary'].apply(sal_split)
## dropping dupes
indeed_year[['title','location','company','salary']].drop_duplicates(inplace = True)
"""
Explanation: The function below gets the mean of the two salaries if there is a range, and converts to a float if there is only one listed. The function is then applied to the dataframe, replacing the original salary.
End of explanation
"""
## Importing dataset of salaries that was previously saved
df = pd.read_csv('indeed.csv', index_col = 0)
## Found 303 records with salaries
print df.shape
## calculate median
## lambda applies a 1 if salary is above median, 0 if below
med = np.median(df.salary)
print "The median salary is", med
df['high_low'] = df['salary'].map(lambda x: 1 if x > med else 0)
"""
Explanation: Export file to csv - will not run this cell, and will instead use the originally scraped listings.
python
indeed_year.to_csv('indeed.csv', encoding='utf-8')
Begin process for model building below
We need to calculate median salary to determine if listing with a salary is above or below the median. We are making this into a classification problem rather than a regression problem, since salaries can fluctuate and we do not have a very large sample of jobs to work with.
End of explanation
"""
df.groupby('search_city').salary.mean().sort_values(ascending = False).plot(kind='bar')
plt.xlabel('City')
plt.ylabel('Mean Salary')
plt.show()
df.search_city.value_counts().plot(kind='bar')
plt.xlabel('City')
plt.ylabel('Job Results')
plt.show()
"""
Explanation: The two graphs below show that San Jose and San Francisco have the highest mean salaries, while our dataset has the most job listings with salaries from New York.
End of explanation
"""
## Text analysis of job titles
job_titles = df.title
## Using a count vectorizer to ID all individual words across job titles
## Limit ngrams to pairs of 2 words since titles are shorter
tfv = TfidfVectorizer(lowercase = True, strip_accents = 'unicode', ngram_range=(2,2), stop_words = 'english')
tfv_title = tfv.fit_transform(job_titles).todense()
# create dataframe on count vectorized data
title_counts = pd.DataFrame(tfv_title, columns = tfv.get_feature_names())
random_state = 43
X = pd.concat([pd.get_dummies(df['search_city'], drop_first = True).reset_index(), title_counts.reset_index()], axis = 1)
X.drop('index', axis = 1, inplace = True)
y = df['high_low']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = random_state)
logit = LogisticRegression(random_state = random_state)
logit_fit = logit.fit(X_train, y_train)
logit_y_pred = logit.predict(X_test)
print classification_report(y_test, logit_y_pred, target_names=['Low Salary', 'High Salary'])
print confusion_matrix(y_test, logit_y_pred)
print accuracy_score(y_test, logit_y_pred)
coefs = logit_fit.coef_[0]
names = X.columns
print "Features sorted by their logistic regression coefficients:"
print sorted(zip(map(lambda x: round(x, 4), coefs), names),
reverse=True)
"""
Explanation: The model is created below.
I tested several different classification methods, and several different metrics, but ultimately found that a logistic regression was most accurate, and that using a dummy variable for the search city and a tf-idf vectorizer of the job title with n-grams of 2 words provided the best results. 78.9% of salaries in our test set were correctly predicted using these features with a logistic regression.
End of explanation
"""
indeed_missing.search_city.value_counts().plot(kind='bar')
plt.xlabel('City')
plt.ylabel('Job Results - No Listed Salary')
plt.show()
print("There are {} total job postings with missing salaries".format(len(indeed_missing)))
"""
Explanation: The search locations San Jose and San Francisco were the most indicative of a salary being high paying above the median, while the word pairs 'quantitative analyst' and 'machine learning' in the job title were most indicative of a high paying salary.
Phoenix, San Antonio, Portland, and Los Angeles had similar negative coefficients, indicating that they have about equal weight in determining that a job will have a lower paying salary below the median. The word pairs 'data analyst' and 'research analyst' were the most indicative of a lower paying salary.
Missing Salary Predictions
Below, we will predict if the remaining job postings that did not list salaries will be low or high paying using the above model. The data will need to be transformed so that the search location is a dummy variable, and the job titles are converted using the tf-idf vectorizer fit above.
End of explanation
"""
## Count Vectorizing missing salary job titles
## Will use same words from the fit count vectorizer from our training data to assist in classification
missing_salary_titles = indeed_missing.title
missing_title_counts = pd.DataFrame(tfv.transform(missing_salary_titles).todense(), columns=tfv.get_feature_names())
## Get dummies for job location to allow it to be predicted
miss_city_dum = pd.get_dummies(indeed_missing['search_city'], drop_first = True)
miss_city_dum.reset_index(inplace = True)
## Combine two dfs
missing_sals = pd.concat([miss_city_dum, missing_title_counts], axis = 1)
missing_sals.drop('index', axis = 1, inplace = True)
logit_title_pred = logit_fit.predict(missing_sals)
indeed_missing['high_low'] = logit_title_pred
print(indeed_missing.head())
## Aggregating information by city to plot
indeed_missing_agg = pd.concat([indeed_missing.groupby('search_city').sum(),
pd.DataFrame(indeed_missing.groupby('search_city').title.count())], axis = 1)
## Getting percent of jobs that are high paying by city
indeed_missing_agg['high_pct'] = indeed_missing_agg.high_low / indeed_missing_agg.title
indeed_missing_agg.head()
indeed_missing_agg.high_pct.plot.bar()
plt.xlabel('City')
plt.ylabel('Rate of High Salary Jobs')
plt.title('Rate of High Salary Jobs Predicted by City')
plt.show()
"""
Explanation: Transforming job listings so they can be predicted as high or low paying...
End of explanation
"""
|
opencb/opencga | opencga-client/src/main/python/notebooks/general-notebooks/pyopencga_basic_notebook_003-variants.ipynb | apache-2.0 | from pyopencga.opencga_config import ClientConfiguration
from pyopencga.opencga_client import OpencgaClient
from pprint import pprint
import json
"""
Explanation: pyOpenCGA basic variant and interpretation usage
[NOTE] The server methods used by pyopencga client are defined in the following swagger URL:
- http://bioinfodev.hpc.cam.ac.uk/opencga-test/webservices
[NOTE] Current implemented methods are registered at the following spreadsheet:
- https://docs.google.com/spreadsheets/d/1QpU9yl3UTneqwRqFX_WAqCiCfZBk5eU-4E3K-WVvuoc/edit?usp=sharing
Loading pyOpenCGA
End of explanation
"""
# server host
host = 'http://bioinfo.hpc.cam.ac.uk/opencga-prod'
# user credentials
user = "demouser"
passwd = "demouser" ## you can skip this, see below.
# the user demo access projects from user opencga
prj_owner = "demo"
"""
Explanation: Setting credentials for LogIn
Credentials
Plese add the credentials for opencga login into a file in json format and read them from there.
End of explanation
"""
# Creating ClientConfiguration dict
config_dict = {"rest": {
"host": host
}
}
print("Config information:\n",config_dict)
"""
Explanation: Creating ConfigClient for server connection configuration
End of explanation
"""
config = ClientConfiguration(config_dict)
oc = OpencgaClient(config)
## Login
oc.login(user, passwd)
"""
Explanation: Login with user credentials
End of explanation
"""
## Let's use corpasome study
study = 'family:corpasome'
variant_client = oc.variants
variants = variant_client.query(study=study, gene='BRCA2', limit=1)
# pprint(variants.get_responses())
"""
Explanation: You are now connected to OpenCGA
Querying Variants
Let's get the variant client to query OpenCGA server
End of explanation
"""
|
Caranarq/01_Dmine | Datasets/INERE/INERE.ipynb | gpl-3.0 | descripciones = {
'P0009' : 'Potencial de aprovechamiento energía solar',
'P0010' : 'Potencial de aprovechamiento energía eólica',
'P0011' : 'Potencial de aprovechamiento energía geotérmica',
'P0012' : 'Potencial de aprovechamiento energía de biomasa',
'P0606' : 'Generación mediante fuentes renovables de energía',
'P0607' : 'Potencial de fuentes renovables de energía',
'P0608' : 'Capacidad instalada para aprovechar fuentes renovables de energía'
}
# Librerias utilizadas
import pandas as pd
import sys
import urllib
import os
import csv
# Configuracion del sistema
print('Python {} on {}'.format(sys.version, sys.platform))
print('Pandas version: {}'.format(pd.__version__))
import platform; print('Running on {} {}'.format(platform.system(), platform.release()))
"""
Explanation: Estandarizacion de datos del Inventario Nacional de Energias Renovables
1. Introduccion
Parámetros que salen de esta fuente
ID |Descripción
---|:----------
P0009|Potencial de aprovechamiento energía solar
P0010|Potencial de aprovechamiento energía eólica
P0011|Potencial de aprovechamiento energía geotérmica
P0012|Potencial de aprovechamiento energía de biomasa
P0606|Generación mediante fuentes renovables de energía
P0607|Potencial de fuentes renovables de energía
P0608|Capacidad instalada para aprovechar fuentes renovables de energía
End of explanation
"""
# Lectura del dataset de energia renovable actual como descargado
directorio = r'D:\PCCS\00_RawData\01_CSV\INERE\\'
archivo = directorio+'Actual Energia Renovable.xls'
raw_actual = pd.read_excel(archivo).dropna()
raw_actual.head()
# El dataset envía error cuando se intenta leer directamente
"""
Explanation: Descarga de datos
Los datos se encuentran en la plataforma del Inventario Nacional de Energias Renovables (INERE) ubicada en https://dgel.energia.gob.mx/inere/, y tienen que descargarse manualmente porque su página está elaborada en Flash y no permite la descarga sistematizada de datos. A veces ni funciona por sí misma, manda errores al azar.
Se descargaron dos datasets, uno que contiene el Inventario Actual y otro con el Inventario Potencial de energías renovables a nivel nacional.
Como la base de datos no incluye claves geoestadísticas, estas tienen que asignarse manualmente. A continuacion se muestra el encabezado del archivo que se procesó a mano.
End of explanation
"""
# Lectura del dataset de energia renovable actual después de ser re-guardado en excel
directorio = r'D:\PCCS\00_RawData\01_CSV\INERE\\'
archivo = directorio+'Actual Energia Renovable.xlsx'
raw_actual = pd.read_excel(archivo).dropna()
raw_actual.head()
"""
Explanation: Ninguno de los dos datasets puede ser leido por python tal como fue descargado, por lo que tienen que abrirse en excel y guardarse nuevamente en formato xlsx.
Dataset energia renovable actual
End of explanation
"""
# Lectura del dataset de energia renovable actual procesado manualmente
directorio = r'D:\PCCS\00_RawData\01_CSV\INERE\\'
archivo = directorio+'Actual CVE_GEO.xlsx'
actual_proc = pd.read_excel(archivo, dtype={'CVE_MUN': 'str'}).dropna()
actual_proc.head()
"""
Explanation: Se asignó CVE_MUN manualmente a la mayoría de los registros. No fue posible encontrar una clave geoestadística para las siguientes combinaciones de estado/municipio
ESTADO |MUNICIPIO
-------|:----------
Veracruz|Jiotepec
Chiapas|Atizapan
Oaxaca|Motzorongo
Guerrero|La Venta
Jalisco|Santa Rosa
Para los siguientes registros, la CVE_MUN fue intuida desde el nombre de la población o el nombre del proyecto:
ESTADO |MUNICIPIO|CVE_MUN|PROYECTO
-------|:----------|-------|------
Puebla|Atencingo|21051|Ingenio de Atencingo
Puebla|Tatlahuquitepec|21186|Mazatepec
A continuación se presenta el encabezado del dataset procesado manualmente, incluyendo columnas que se utilizaron como auxiliares para la identificación de municipios
End of explanation
"""
list(actual_proc)
# Eliminacion de columnas redundantes y temporales
del(actual_proc['ESTADO'])
del(actual_proc['MUNICIPIO'])
del(actual_proc['3EDO3'])
del(actual_proc['3MUN3'])
del(actual_proc['GEO_EDO'])
del(actual_proc['GEOEDO_3MUN'])
del(actual_proc['GEO_MUN_Nom'])
# Nombre Unico de Coloumnas
actual_proc = actual_proc.rename(columns = {
'NOMBRE' : 'NOMBRE PROYECTO',
'PRODUCTOR': 'SECTOR PRODUCCION',
'TIPO': 'TIPO FUENTE ENER',
'UNIDADES': 'UNIDADES GEN'})
# Asignacion de CVE_MUN como indice
actual_proc.set_index('CVE_MUN', inplace=True)
actual_proc.head()
# Metadatos estándar
metadatos = {
'Nombre del Dataset': 'Inventario Actual de Energias Renovables',
'Descripcion del dataset': 'Plantas de generación de energía a partir de fuentes renovables en la República Mexicana',
'Disponibilidad Temporal': '2014',
'Periodo de actualizacion': 'No Determinada',
'Nivel de Desagregacion': 'Localidad, Municipal, Estatal, Nacional',
'Notas': None,
'Fuente': 'SENER',
'URL_Fuente': 'https://dgel.energia.gob.mx/inere/',
'Dataset base': None,
}
# Convertir metadatos a dataframe
actualmeta = pd.DataFrame.from_dict(metadatos, orient='index', dtype=None)
actualmeta.columns = ['Descripcion']
actualmeta = actualmeta.rename_axis('Metadato')
actualmeta
list(actual_proc)
# Descripciones de columnas
variables = {
'NOMBRE PROYECTO': 'Nombre del proyecto de produccion de energia',
'SECTOR PRODUCCION': 'Sector al que pertenece el proyecto de produccion de energia',
'TIPO FUENTE ENER': 'Tipo de fuente de donde se obtiene la energía',
'UNIDADES GEN': 'Numero de generadores instalados por proyecto',
'CAPACIDAD INSTALADA (MW)': 'Capacidad Instalada en Megawatts',
'GENERACIÓN (GWh/a) ' : 'Generación de Gigawatts/hora al año'
}
# Convertir descripciones a dataframe
actualvars = pd.DataFrame.from_dict(variables, orient='index', dtype=None)
actualvars.columns = ['Descripcion']
actualvars = actualvars.rename_axis('Mnemonico')
actualvars
# Guardar dataset limpio para creacion de parametro.
file = r'D:\PCCS\01_Dmine\Datasets\INERE\ER_Actual.xlsx'
writer = pd.ExcelWriter(file)
actual_proc.to_excel(writer, sheet_name = 'DATOS')
actualmeta.to_excel(writer, sheet_name = 'METADATOS')
actualvars.to_excel(writer, sheet_name = 'VARIABLES')
writer.save()
print('---------------TERMINADO---------------')
"""
Explanation: Para guardar el dataset y utilizarlo en la construcción del parámetro, se eliminarán algunas columnas.
End of explanation
"""
# Lectura del dataset de potencial de energia renovable después de ser re-guardado en excel
directorio = r'D:\PCCS\00_RawData\01_CSV\INERE\\'
archivo = directorio+'Potencial Energia Renovable.xlsx'
raw_potencial = pd.read_excel(archivo, dtype={'CVE_MUN': 'str'}).dropna()
raw_potencial.head()
# Eliminacion de columnas redundantes y temporales
potencial_proc = raw_potencial
del(potencial_proc['ESTADO'])
del(potencial_proc['MUNICIPIO'])
del(potencial_proc['3EDO3'])
del(potencial_proc['3MUN3'])
del(potencial_proc['GEO_EDO'])
del(potencial_proc['GEOEDO_3MUN'])
del(potencial_proc['GEO_MUN_Nom'])
potencial_proc.head()
potencial_proc['SUBCLASIFICACIÓN'].unique()
# Nombre Unico de Coloumnas
potencial_proc = potencial_proc.rename(columns = {
'PROYECTO' : 'NOMBRE PROYECTO',
'CLASIFICACIÓN': 'PROBABILIDAD',
'TIPO': 'TIPO FUENTE ENER',
'SUBCLASIFICACIÓN': 'NOTAS'})
# Asignacion de CVE_MUN como indice
potencial_proc.set_index('CVE_MUN', inplace=True)
potencial_proc.head()
# Metadatos estándar
metadatos = {
'Nombre del Dataset': 'Inventario Potencial de Energias Renovables',
'Descripcion del dataset': 'listado de Proyectos con potencial para generar energía a partir de fuentes renovables',
'Disponibilidad Temporal': '2014',
'Periodo de actualizacion': 'No Determinada',
'Nivel de Desagregacion': 'Localidad, Municipal, Estatal, Nacional',
'Notas': None,
'Fuente': 'SENER',
'URL_Fuente': 'https://dgel.energia.gob.mx/inere/',
'Dataset base': None,
}
# Convertir metadatos a dataframe
potenmeta = pd.DataFrame.from_dict(metadatos, orient='index', dtype=None)
potenmeta.columns = ['Descripcion']
potenmeta = potenmeta.rename_axis('Metadato')
potenmeta
list(potencial_proc)
potencial_proc['FUENTE'].unique()
# Descripciones de columnas
variables = {
'NOMBRE PROYECTO': 'Nombre del proyecto de produccion de energia',
'TIPO FUENTE ENER': 'Tipo de fuente de donde se obtiene la energía',
'PROBABILIDAD': 'Certeza respecto al proyecto deproduccion de energía',
'NOTAS': 'Notas',
'CAPACIDAD INSTALABLE (MW)': 'Capacidad Instalable en Megawatts',
'POTENCIAL (GWh/a) ' : 'Potencial de Generación de Gigawatts/hora al año',
'FUENTE': 'Fuente de información'
}
# Convertir descripciones a dataframe
potencialvars = pd.DataFrame.from_dict(variables, orient='index', dtype=None)
potencialvars.columns = ['Descripcion']
potencialvars = potencialvars.rename_axis('Mnemonico')
potencialvars
# Guardar dataset limpio para creacion de parametro.
file = r'D:\PCCS\01_Dmine\Datasets\INERE\ER_Potencial.xlsx'
writer = pd.ExcelWriter(file)
potencial_proc.to_excel(writer, sheet_name = 'DATOS')
potenmeta.to_excel(writer, sheet_name = 'METADATOS')
potencialvars.to_excel(writer, sheet_name = 'VARIABLES')
writer.save()
print('---------------TERMINADO---------------')
"""
Explanation: Dataset Potencial de Energia Renovable
End of explanation
"""
|
darkomen/TFG | medidas/20072015/FILAEXTRUDER/Analisis.ipynb | cc0-1.0 | %pylab inline
#Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('datos.csv')
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
columns = ['Diametro X','VELOCIDAD']
#Mostramos un resumen de los datos obtenidoss
datos[columns].describe()
#datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']]
"""
Explanation: Análisis de los datos obtenidos
Producción del día 20 de Julio de 2015
Los datos del experimento:
* Hora de inicio: 09:56
* Hora final : 10:1
* $T: 135ºC$
* $V_{min} tractora: 1 mm/s$
* $V_{max} tractora: 3 mm/s$
Se desea comprobar si el filamento que podemos llegar a extruir con el sistema de la tractora puede llegar a ser bueno como para regularlo.
End of explanation
"""
#datos.ix[:, "Diametro X":"Diametro Y"].plot(secondary_y=['VELOCIDAD'],figsize=(16,10),ylim=(0.5,3)).hlines([1.85,1.65],0,3500,colors='r')
datos[columns].plot(secondary_y=['VELOCIDAD'],ylim=(1,2.5),figsize=(10,5),title='Relación entre la velocidad de tracción y el diámetro').hlines([1.6 ,1.8],0,2000,colors='r')
#datos['RPM TRAC'].plot(secondary_y='RPM TRAC')
datos.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
"""
Explanation: Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica
End of explanation
"""
plt.scatter(x=datos['Diametro X'], y=datos['Diametro Y'], marker='.')
"""
Explanation: Con esta segunda aproximación se ha conseguido estabilizar los datos. Se va a tratar de bajar ese porcentaje. Como cuarta aproximación, vamos a modificar las velocidades de tracción. El rango de velocidades propuesto es de 1.5 a 5.3, manteniendo los incrementos del sistema experto como en el actual ensayo.
Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento
End of explanation
"""
datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)]
#datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
"""
Explanation: Filtrado de datos
Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas.
End of explanation
"""
plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.')
"""
Explanation: Representación de X/Y
End of explanation
"""
ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
"""
Explanation: Analizamos datos del ratio
End of explanation
"""
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) |
(datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
"""
Explanation: Límites de calidad
Calculamos el número de veces que traspasamos unos límites de calidad.
$Th^+ = 1.85$ and $Th^- = 1.65$
End of explanation
"""
|
ldhagen/docker-jupyter | OpenCV_Recognize.ipynb | mit | ! wget http://docs.opencv.org/master/res_mario.jpg
import cv2
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image as PIL_Image
from IPython.display import Image as IpyImage
IpyImage(filename='res_mario.jpg')
"""
Explanation: OpenCV template recognition from http://docs.opencv.org/master/d4/dc6/tutorial_py_template_matching.html#gsc.tab=0
End of explanation
"""
img_full = PIL_Image.open('res_mario.jpg')
img_half = img_full.crop((0,0,img_full.size[0]/2,img_full.size[1]))
img_half.save('mario_test1.jpg')
IpyImage(filename='mario_test1.jpg')
"""
Explanation: Crop the image to make an initial test case
End of explanation
"""
source = PIL_Image.open('mario_test1.jpg')
coin = source.crop((100,113,110,129))
coin.save('coin.jpg')
IpyImage(filename = 'coin.jpg')
"""
Explanation: next grab a gold coin as template
End of explanation
"""
img_rgb = cv2.imread('mario_test1.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('coin.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imwrite('res.jpg',img_rgb)
IpyImage(filename = 'res.jpg')
"""
Explanation: next process template
End of explanation
"""
|
BeyondTheClouds/enoslib | docs/tutorials/iotlab/tuto_iotlab_g5k_ipv6.ipynb | gpl-3.0 | from enoslib import *
import logging
import sys
"""
Explanation: Grid'5000 and FIT/IoT-LAB - IPv6
Introduction
This example shows how to interact with both platforms in a single experiment.
An IPv6 network is built in IoT-LAB platform, composed of a border sensor and CoAP servers.
A node in Grid'5000 is the client, which uses a CoAP client to read the sensor using its global IPv6 address.
Inspired on: https://www.iot-lab.info/legacy/tutorials/contiki-coap-m3/index.html
End of explanation
"""
log = logging.getLogger()
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileHandler = logging.FileHandler("debug.log", 'a')
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(formatter)
log.addHandler(fileHandler)
cformat = logging.Formatter("[%(levelname)8s] : %(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(cformat)
consoleHandler.setLevel(logging.INFO)
log.addHandler(consoleHandler)
"""
Explanation: Configuring logging: save DEBUG to a file and INFO to stdout
End of explanation
"""
job_name="iotlab_g5k-ipv6"
iotlab_dict = {
"walltime": "01:00",
"job_name": job_name,
"resources": {
"machines": [
{
"roles": ["border_router"],
"archi": "m3:at86rf231",
"site": "saclay",
"number": 1,
"image": "border-router.iotlab-m3",
},
{
"roles": ["sensor"],
"archi": "m3:at86rf231",
"site": "saclay",
"number": 2,
"image": "er-example-server.iotlab-m3",
},
]
},
}
iotlab_conf = IotlabConf.from_dictionary(iotlab_dict)
"""
Explanation: Getting resources
IoT-LAB provider configuration: reserve M3 nodes in saclay site
Note: It uses the following M3 images: border-router.iotlab-m3 and er-example-server.iotlab-m3.
More details on how to generate these images in: https://www.iot-lab.info/legacy/tutorials/contiki-coap-m3/index.html
End of explanation
"""
g5k_dict = {
"job_type": "allow_classic_ssh",
"job_name": job_name,
"resources": {
"machines": [
{
"roles": ["client"],
"cluster": "yeti",
"nodes": 1,
"primary_network": "default",
"secondary_networks": [],
},
],
"networks": [
{"id": "default", "type": "prod", "roles": ["my_network"], "site": "grenoble"}
],
},
}
g5k_conf = G5kConf.from_dictionnary(g5k_dict)
"""
Explanation: Grid'5000 provider configuration: reserve nodes in grenoble
End of explanation
"""
import iotlabcli.auth
iotlab_user, _ = iotlabcli.auth.get_user_credentials()
iotlab_frontend_conf = (
StaticConf()
.add_machine(
roles=["frontend"],
address="saclay.iot-lab.info",
alias="saclay",
user=iotlab_user
)
.finalize()
)
"""
Explanation: We still need a Static provider to interact with the IoT-LAB frontend machine
End of explanation
"""
iotlab_provider = Iotlab(iotlab_conf)
iotlab_roles, _ = iotlab_provider.init()
print(iotlab_roles)
"""
Explanation: IoT-LAB: getting resources
End of explanation
"""
g5k_provider = G5k(g5k_conf)
g5k_roles, g5knetworks = g5k_provider.init()
print(g5k_roles)
"""
Explanation: Grid'5000: getting resources
End of explanation
"""
frontend_provider = Static(iotlab_frontend_conf)
frontend_roles, _ = frontend_provider.init()
print(frontend_roles)
"""
Explanation: Static: getting resources
End of explanation
"""
result=run_command("dhclient -6 br0", roles=g5k_roles)
result = run_command("ip address show dev br0", roles=g5k_roles)
print(result['ok'])
"""
Explanation: Configuring network connectivity
Enabling IPv6 on Grid'5000 nodes (https://www.grid5000.fr/w/IPv6)
End of explanation
"""
iotlab_ipv6_net="2001:660:3207:4c0::"
tun_cmd = "sudo tunslip6.py -v2 -L -a %s -p 20000 %s1/64 > tunslip.output 2>&1" % (iotlab_roles["border_router"][0].alias, iotlab_ipv6_net)
result=run_command(tun_cmd, roles=frontend_roles, asynch=3600, poll=0)
"""
Explanation: Starting tunslip command in frontend.
Redirect tunslip command output to a file to read it later.
End of explanation
"""
iotlab_roles["border_router"][0].reset()
"""
Explanation: Reseting border router
End of explanation
"""
result = run_command("cat tunslip.output", roles=frontend_roles)
print(result['ok'])
import re
out = result['ok']['saclay']['stdout']
print(out)
match = re.search(rf'Server IPv6 addresses:\n.+({iotlab_ipv6_net}\w{{4}})', out, re.MULTILINE|re.DOTALL)
br_ipv6 = match.groups()[0]
print("Border Router IPv6 address from tunslip output: %s" % br_ipv6)
"""
Explanation: Get the Border Router IPv6 address from tunslip output
End of explanation
"""
result = run_command("ping6 -c3 %s" % br_ipv6, pattern_hosts="client*", roles=g5k_roles)
print(result['ok'])
"""
Explanation: Checking ping from Grid'5000 to border router node
End of explanation
"""
with play_on(roles=g5k_roles) as p:
p.apt(name=["python3-aiocoap", "lynx"], state="present")
"""
Explanation: Installing and using CoAP clients
Install aiocoap client and lynx on grid'5000 nodes
End of explanation
"""
result = run_command("lynx -dump http://[%s]" % br_ipv6, roles=g5k_roles)
print(result['ok'])
"""
Explanation: Grab the CoAP server node’s IPv6 address from the BR’s web interface
End of explanation
"""
out = result['ok'][g5k_roles["client"][0].address]['stdout']
print(out)
match = re.search(r'fe80::(\w{4})', out, re.MULTILINE|re.DOTALL)
node_uid = match.groups()[0]
print(node_uid)
result = run_command("aiocoap-client coap://[%s%s]:5683/sensors/light" % (iotlab_ipv6_net, node_uid), roles=g5k_roles)
print(result['ok'])
"""
Explanation: For a CoAP server, GET light sensor
End of explanation
"""
result = run_command("aiocoap-client coap://[%s%s]:5683/sensors/pressure" % (iotlab_ipv6_net, node_uid), roles=g5k_roles)
print(result['ok'])
"""
Explanation: GET pressure for the same sensor
End of explanation
"""
result = run_command("pgrep tunslip6 | xargs kill", roles=frontend_roles)
"""
Explanation: Clean-up phase
Stop tunslip in frontend node
End of explanation
"""
g5k_provider.destroy()
iotlab_provider.destroy()
"""
Explanation: Destroy jobs in testbeds
End of explanation
"""
|
nitheeshkl/Udacity_CarND_LaneLines_P1 | P1.ipynb | mit | #importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
"""
Explanation: Self-Driving Car Engineer Nanodegree
Project: Finding Lane Lines on the Road
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a write up template that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the rubric points for this project.
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".
The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
Run the cell below to import some packages. If you get an import error for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.
Import Packages
End of explanation
"""
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
"""
Explanation: Read in an Image
End of explanation
"""
import math
from scipy import stats
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
#return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
# to store the slopes & intercepts from previous frame
previous_lslope = 1
previous_lintercept = 0
previous_rslope = 1
previous_rintercept = 0
def draw_lines(img, lines, color=[255, 0, 0], thickness=8):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# to store our x,y co-ordinate of left & right lane lines
left_x = []
left_y = []
right_x = []
right_y = []
for line in lines:
for x1,y1,x2,y2 in line:
# calculate the slope
slope = (y2-y1)/(x2-x1)
# if positive slope, then right line because (0,0) is top left corner
if (slope > 0.5) and (slope < 0.65):
# store the points
right_x.append(x1)
right_x.append(x2)
right_y.append(y1)
right_y.append(y2)
# draw the actual detected hough lines as well to visually comapre the error
# cv2.line(img, (x1, y1), (x2, y2), [0,255,0], 2)
# else its a left line
elif (slope < -0.5) and (slope > -0.7):
# store the points
left_x.append(x1)
left_x.append(x2)
left_y.append(y1)
left_y.append(y2)
# draw the actual detected hough lines as well to visually comapre the error
# cv2.line(img, (x1, y1), (x2, y2), [0,255,0], 2)
global previous_lslope
global previous_lintercept
global previous_rslope
global previous_rintercept
# use linear regression to find the slope & intercepts of our left & right lines
if left_x and left_y:
previous_lslope, previous_lintercept, lr_value, lp_value, lstd_err = stats.linregress(left_x,left_y)
if right_x and right_y:
previous_rslope, previous_rintercept, rr_value, rp_value, rstd_err = stats.linregress(right_x,right_y)
# else in all other conditions use the slope & intercept from the previous frame, presuming the next
# frames will result in correct slope & incercepts for lane lines
# FIXME: this logic will fail in conditions when lane lines are not detected in consecutive next frames
# better to not show/detect false lane lines?
# extrapolate the lines in the lower half of the image using our detected slope & intercepts
x = img.shape[1]
y = img.shape[0]
# left line
l_y1 = int(round(y))
l_y2 = int(round(y*0.6))
l_x1_lr = int(round((l_y1-previous_lintercept)/previous_lslope))
l_x2_lr = int(round((l_y2-previous_lintercept)/previous_lslope))
# right line
r_y1 = int(round(y))
r_y2 = int(round(y*0.6))
r_x1_lr = int(round((r_y1-previous_rintercept)/previous_rslope))
r_x2_lr = int(round((r_y2-previous_rintercept)/previous_rslope))
# draw the extrapolated lines onto the image
cv2.line(img, (l_x1_lr, l_y1), (l_x2_lr, l_y2), color, thickness)
cv2.line(img, (r_x1_lr, r_y1), (r_x2_lr, r_y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
"""
Explanation: Ideas for Lane Detection Pipeline
Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:
cv2.inRange() for color selection
cv2.fillPoly() for regions selection
cv2.line() to draw lines on an image given endpoints
cv2.addWeighted() to coadd / overlay two images
cv2.cvtColor() to grayscale or change color
cv2.imwrite() to output images to file
cv2.bitwise_and() to apply a mask to an image
Check out the OpenCV documentation to learn about these and discover even more awesome functionality!
Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
End of explanation
"""
import os
os.listdir("test_images/")
"""
Explanation: Test Images
Build your pipeline to work on the images in the directory "test_images"
You should make sure your pipeline works well on these images before you try the videos.
End of explanation
"""
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images directory.
def showImage(img,cmap=None):
# create a new figure to show each image
plt.figure()
# show image
plt.imshow(img,cmap=cmap)
def detectLaneLines(image):
# get image sizes. X=columsns & Y=rows
x = image.shape[1]
y = image.shape[0]
# convert the image to gray scale
grayImage = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# blur the image
blurImage = gaussian_blur(grayImage,3)
# perform edge detection
cannyImage = canny(blurImage,50,150)
# define the co-ordinates for the interested section of the image.
# we're only interested in the bottom half where there is road
vertices = np.array([[(x*0.15,y),(x*0.45,y*0.6),(x*0.55,y*0.6),(x*0.85,y)]],dtype=np.int32)
# create a masked image of only the interested region
maskedImage = region_of_interest(cannyImage, vertices)
# detect & draw lines using hough algo on our masked image
rho = 1 # distance resolution in pixels of the Hough grid
theta = (np.pi/180)*1 # angular resolution in radians of the Hough grid
threshold = 10 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 3 #minimum number of pixels making up a line
max_line_gap = 3 # maximum gap in pixels between connectable line segments
houghImage = hough_lines(maskedImage,rho, theta, threshold, min_line_length, max_line_gap)
# merge the mask layer onto the original image and return it
return weighted_img(houghImage,image)
# read a sample image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
# show detected lanes in the sample
showImage(detectLaneLines(image),cmap='gray')
"""
Explanation: Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the test_images_output directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
End of explanation
"""
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
return detectLaneLines(image)
"""
Explanation: Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
solidWhiteRight.mp4
solidYellowLeft.mp4
Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.
If you get an error that looks like this:
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
Follow the instructions in the error message and check out this forum post for more troubleshooting tips across operating systems.
End of explanation
"""
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
"""
Explanation: Let's try the one with the solid white lane on the right first ...
End of explanation
"""
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
"""
Explanation: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
End of explanation
"""
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
"""
Explanation: Improve the draw_lines() function
At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".
Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.
Now for the one with the solid yellow lane on the left. This one's more tricky!
End of explanation
"""
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
"""
Explanation: Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a link to the writeup template file.
Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
End of explanation
"""
|
sorig/shogun | doc/ipython-notebooks/regression/Regression.ipynb | bsd-3-clause | %pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from cycler import cycler
# import all shogun classes
from shogun import *
slope = 3
X_train = rand(30)*10
y_train = slope*(X_train)+random.randn(30)*2+2
y_true = slope*(X_train)+2
X_test = concatenate((linspace(0,10, 50),X_train))
#Convert data to shogun format features
feats_train = features(X_train.reshape(1,len(X_train)))
feats_test = features(X_test.reshape(1,len(X_test)))
labels_train = RegressionLabels(y_train)
"""
Explanation: Regression Models
By Saurabh Mahindre - <a href="https://github.com/Saurabh7">github.com/Saurabh7</a> as a part of <a href="http://www.google-melange.com/gsoc/project/details/google/gsoc2014/saurabh7/5750085036015616">Google Summer of Code 2014 project</a> mentored by - Heiko Strathmann - <a href="https://github.com/karlnapf">github.com/karlnapf</a> - <a href="http://herrstrathmann.de/">herrstrathmann.de</a>
This notebook demonstrates various regression methods provided in Shogun. Linear models like Least Square regression, Ridge regression, Least Angle regression, etc. and also kernel based methods like Kernel Ridge regression are discussed and applied to toy and real life data.
Introduction
Least Squares regression
Prediction using Least Squares
Training and generating weights
Ridge Regression
Weights and regularization
Least Angle Regression and LASSO
Kernel Ridge Regression
Support Vector Regression
Introduction
Regression is a case of supervised learning where the goal is to learn a mapping from inputs $x\in\mathcal{X}$ to outputs $y\in\mathcal{Y}$, given a labeled set of input-output pairs $\mathcal{D} = {(x_i,y_i)}^{\text N}_{i=1} \subseteq \mathcal{X} \times \mathcal{Y}$. The response variable $y_i$ is continuous in regression analysis. Regression finds applications in many fields like for predicting stock prices or predicting consumption spending, etc. In linear regression, the mapping is a linear (straight-line) equation.
Least Squares regression
A Linear regression model can be defined as $\text y =$ $\bf {w} \cdot \bf{x} $ $+ b$. Here $\text y$ is the predicted value, $\text x$ the independent variable and $\text w$ the so called weights.</br> We aim to find the linear function (line) that best explains the data, i.e. that minimises some measure of deviation to the training data $\mathcal{D}$. One such measure is the sum of squared distances. The Ordinary Least Sqaures method minimizes the sum of squared distances between the observed responses in the dataset and the responses predicted by the linear approximation.
The distances called residuals have to minimized. This can be represented as:$$E({\bf{w}}) = \sum_{i=1}^N(y_i-{\bf w}\cdot {\bf x}_i)^2$$
One can differentiate with respect to $\bf w$ and equate to zero to determine the $\bf w$ that minimises $E({\bf w})$. This leads to solution of the form:
$${\bf w} = \left(\sum_{i=1}^N{\bf x}i{\bf x}_i^T\right)^{-1}\left(\sum{i=1}^N y_i{\bf x}_i\right)$$
Prediction using Least Squares
Regression using Least squares is demonstrated below on toy data. Shogun provides the tool to do it using CLeastSquaresRegression class. The data is a straight line with lot of noise and having slope 3. Comparing with the mathematical equation above we thus expect $\text w$ to be around 3 for a good prediction. Once the data is converted to Shogun format, we are ready to train the machine. To label the training data CRegressionLabels are used.
End of explanation
"""
ls = LeastSquaresRegression(feats_train, labels_train)
ls.train()
w = ls.get_real_vector('w')
print('Weights:')
print(w)
"""
Explanation: Training and generating weights
LeastSquaresRegression has to be initialised with the training features and training labels. Once that is done to learn from data we train() it. This also generates the $\text w$ from the general equation described above. To access $\text w$ use get_real_vector('w').
End of explanation
"""
out = ls.apply(feats_test).get_labels()
"""
Explanation: This value of $\text w$ is pretty close to 3, which certifies a pretty good fit for the training data. Now let's apply this trained machine to our test data to get the ouput values.
End of explanation
"""
figure(figsize=(20,5))
#Regression and true plot
pl1 = subplot(131)
title('Regression')
_ = plot(X_train,labels_train, 'ro')
_ = plot(X_test,out, color='blue')
_ = plot(X_train, y_true, color='green')
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="b")
p3 = Rectangle((0, 0), 1, 1, fc="g")
pl1.legend((p1, p2, p3), ["Training samples", "Predicted output", "True relationship"], loc=2)
xlabel('Samples (X)', fontsize=12)
ylabel('Response variable (Y)', fontsize=12)
#plot residues
pl2 = subplot(132)
title("Squared error and output")
_ = plot(X_test,out, linewidth=2)
gray()
_ = scatter(X_train,labels_train.get_labels(),c=ones(30) ,cmap=gray(), s=40)
for i in range(50,80):
plot([X_test[i],X_test[i]],[out[i],y_train[i-50]] , linewidth=2, color='red')
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="b")
pl2.legend((p1, p2), ["Error/residuals to be squared", "Predicted output"], loc=2)
xlabel('Samples (X)', fontsize=12)
ylabel('Response variable (Y)', fontsize=12)
jet()
"""
Explanation: As an aid to visualisation, a plot of the output and also of the residuals is shown. The sum of the squares of these residuals is minimised.
End of explanation
"""
tau = 0.8
rr = LinearRidgeRegression(tau, feats_train, labels_train)
rr.train()
w = rr.get_real_vector('w')
print(w)
out = rr.apply(feats_test).get_labels()
figure(figsize=(20,5))
#Regression and true plot
pl1 = subplot(131)
title('Ridge Regression')
_ = plot(X_train,labels_train, 'ro')
_ = plot(X_test, out, color='blue')
_ = plot(X_train, y_true, color='green')
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="b")
p3 = Rectangle((0, 0), 1, 1, fc="g")
pl1.legend((p1, p2, p3), ["Training samples", "Predicted output", "True relationship"], loc=2)
xlabel('Samples (X)', fontsize=12)
ylabel('Response variable (Y)', fontsize=12)
jet()
"""
Explanation: Ridge Regression
The function we choose should not only best fit the training data but also generalise well. If the coefficients/weights are unconstrained, they are susceptible to high variance and overfitting. To control variance, one has to regularize the coefficients i.e. control how large the coefficients grow. This is what is done in Ridge regression which is L2 (sum of squared components of $\bf w$) regularized form of least squares. A penalty is imposed on the size of coefficients. The error to be minimized is:
$$E({\bf{w}}) = \sum_{i=1}^N(y_i-{\bf w}\cdot {\bf x}_i)^2 + \tau||{\bf w}||^2$$
Here $\tau$ imposes a penalty on the weights.</br>
By differentiating the regularised training error and equating to zero, we find the optimal $\bf w$, given by:
$${\bf w} = \left(\tau {\bf I}+ \sum_{i=1}^N{\bf x}i{\bf x}_i^T\right)^{-1}\left(\sum{i=1}^N y_i{\bf x}_i\right)$$
Ridge regression can be performed in Shogun using CLinearRidgeRegression class. It takes the regularization constant $\tau$ as an additional argument. Let us see the basic regression example solved using the same.
End of explanation
"""
#Generate Data
def generate_data(N, D):
w = randn(D,1)
X = zeros((N,D))
y = zeros((N,1))
for i in range(N):
x = randn(1,D)
for j in range(D):
X[i][j] = x[0][j]
y = dot(X,w) + randn(N,1);
y.reshape(N,)
return X, y.T
def generate_weights(taus, feats_train, labels_train):
preproc = PruneVarSubMean(True)
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
weights = []
rr = LinearRidgeRegression(tau, feats_train, labels_train)
#vary regularization
for t in taus:
rr.put('tau', t)
rr.train()
weights.append(rr.get_w())
return weights, rr
def plot_regularization(taus, weights):
ax = gca()
ax.set_prop_cycle(cycler('color', ['b', 'r', 'g', 'c', 'k', 'y', 'm']))
ax.plot(taus, weights, linewidth=2)
xlabel('Tau', fontsize=12)
ylabel('Weights', fontsize=12)
ax.set_xscale('log')
"""
Explanation: Relationship between weights and regularization
The prediction in the basic regression example was simliar to that of least squares one. To actually see ridge regression's forte, we analyse how the weights change along with the regularization constant. Data with slightly higher dimensions is sampled to do this because overfitting is more likely to occur in such data. Here put('tau', tau) method is used to set the necessary parameter.
End of explanation
"""
def xval_results(taus):
errors = []
for t in taus:
rr.put('tau', t)
splitting_strategy = CrossValidationSplitting(labels_train, 5)
# evaluation method
evaluation_criterium = MeanSquaredError()
# cross-validation instance
cross_validation = CrossValidation(rr, feats_train, labels_train, splitting_strategy, evaluation_criterium, False)
cross_validation.put('num_runs', 100)
result = cross_validation.evaluate()
result = CrossValidationResult.obtain_from_generic(result)
errors.append(result.get_mean())
return errors
"""
Explanation: The mean squared error (MSE) of an estimator measures the average of the squares of the errors. CMeanSquaredError class is used to compute the MSE as :
$$\frac{1}{|L|} \sum_{i=1}^{|L|} (L_i - R_i)^2$$
Here $L$ is the vector of predicted labels and $R$ is the vector of real labels.
We use 5-fold cross-validation to compute MSE and have a look at how MSE varies with regularisation.
End of explanation
"""
n = 500
taus = logspace(-6, 4, n)
figure(figsize=(20,6))
suptitle('Effect of Regularisation for 10-dimensional data with 200 samples', fontsize=12)
matrix, y = generate_data(200,10)
feats_train = features(matrix.T)
labels_train = RegressionLabels(y[0])
weights, rr = generate_weights(taus, feats_train, labels_train)
errors = xval_results(taus)
p1=subplot(121)
plot_regularization(taus, weights)
p2 = subplot(122)
plot(taus, errors)
p2.set_xscale('log')
xlabel('Tau', fontsize=12)
ylabel('Error', fontsize=12)
jet()
"""
Explanation: Data with dimension: 10 and number of samples: 200 is now sampled.
End of explanation
"""
figure(figsize=(20,6))
suptitle('Effect of Regularisation for 10-dimensional data with 10 samples', fontsize=12)
matrix, y = generate_data(10,10)
feats_train = features(matrix.T)
labels_train = RegressionLabels(y[0])
weights, rr = generate_weights(taus, feats_train, labels_train)
errors = xval_results(taus)
p1 = subplot(121)
plot_regularization(taus, weights)
p2 = subplot(122)
plot(taus, errors)
p2.set_xscale('log')
xlabel('Tau', fontsize=12)
ylabel('Error', fontsize=12)
jet()
"""
Explanation: As seen from the plot of errors, regularisation doesn't seem to affect the errors significantly. One interpretation could be that this is beacuse there is less overfitting as we have large number of samples. For a small sample size as compared to the dimensionality, the test set performance may be poor even. The reason for this is that the regression function will fit the noise too much, while the interesting part of the signal is too small. We now generate 10 samples of 10-dimensions to test this.
End of explanation
"""
#sample some data
X=rand(10)*1.5
for i in range(9):
x=random.standard_normal(10)*0.5
X=vstack((X, x))
y=ones(10)
feats_train=features(X)
labels_train=RegressionLabels(y)
"""
Explanation: The first plot is the famous ridge trace that is the signature of this technique. The plot is really very straight forward to read. It presents the standardized regression coefficients (weights) on the vertical axis and various values of tau (Regularisation constant) along the horizontal axis. Since the values of tau ($\tau$) span several orders of magnitude, we adopt a logarithmic scale along this axis. As tau is increased, the values of the regression estimates change, often wildly at first. At some point, the coefficients seem to settle down and then gradually drift towards zero. Often the value of tau for which these coefficients are at their stable values is the best one. This should be supported by a low error value for that tau.
Least Angle Regression and LASSO
LASSO (Least Absolute Shrinkage and Selection Operator) is another version of Least Squares regression, which uses a L1-norm of the parameter vector. This intuitively enforces sparse solutions, whereas L2-norm penalties usually result in smooth and dense solutions.
$$ \min \|X^T\beta - y\|^2 + \lambda\|\beta\|_1$$
In Shogun, following equivalent form is solved, where increasing $C$ selects more variables:
$$\min \|X^T\beta - y\|^2 \quad s.t. \|\beta\|_1 \leq C $$
One way to solve this regularized form is by using Least Angle Regression (LARS).
LARS is essentially forward stagewise made fast. LARS can be briefly described as follows.
Start with an empty set.
Select $x_j$ that is most correlated with residuals.
Proceed in the direction of $x_j$ until another variable $x_k$ is equally correlated with residuals.
Choose equiangular direction between $x_j$ and $x_k$.
Proceed until third variable enters the active set, etc.
It should be noticed that instead of making tiny hops in the direction of one variable at a time, LARS makes optimally-sized leaps in optimal directions. These directions are chosen to make equal angles (equal correlations) with each of the variables currently in our set (equiangular).
Shogun provides tools for Least angle regression (LARS) and lasso using CLeastAngleRegression class. As explained in the mathematical formaulation, LARS is just like Stepwise Regression but increases the estimated variables in a direction equiangular to each one's correlations with the residual. The working of this is shown below by plotting the LASSO path. Data is generated in a similar way to the previous section.
End of explanation
"""
#Preprocess data
preproc=PruneVarSubMean()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
preprocessor=NormOne()
preprocessor.init(feats_train)
feats_train.add_preprocessor(preprocessor)
feats_train.apply_preprocessor()
print("(No. of attributes, No. of samples) of data:")
print(feats_train.get_feature_matrix().shape)
"""
Explanation: CLeastAngleRegression requires the features to be normalized with a zero mean and unit norm. Hence we use two preprocessors: PruneVarSubMean and NormOne.
End of explanation
"""
#Train and generate weights
la=LeastAngleRegression()
la.put('labels', labels_train)
la.train(feats_train)
size=la.get_path_size()
print ("Size of path is %s" %size)
"""
Explanation: Next we train on the data. Keeping in mind that we had 10 attributes/dimensions in our data, let us have a look at the size of LASSO path which is obtained readily using get_path_size().
End of explanation
"""
#calculate weights
weights=[]
for i in range(size):
weights.append(la.get_w_for_var(i))
s = sum(abs(array(weights)), axis=1)
print ('Max. norm is %s' %s[-1])
figure(figsize(30,7))
#plot 1
ax=subplot(131)
title('Lasso path')
ax.plot(s, weights, linewidth=2)
ymin, ymax = ylim()
ax.vlines(s[1:-1], ymin, ymax, linestyle='dashed')
xlabel("Norm")
ylabel("weights")
#Restrict norm to half for early termination
la.put('max_l1_norm', s[-1]*0.5)
la.train(feats_train)
size=la.get_path_size()
weights=[]
for i in range(size):
weights.append(la.get_w_for_var(i))
s = sum(abs(array(weights)), axis=1)
#plot 2
ax2=subplot(132)
title('Lasso path with restricted norm')
ax2.plot(s, weights, linewidth=2)
ax2.vlines(s[1:-1], ymin, ymax, linestyle='dashed')
xlabel("Norm")
ylabel("weights")
print ('Restricted norm is %s' %(s[-1]))
"""
Explanation: The weights generated ($\beta_i$) and their norm ($\sum_i|\beta_i|$) change with each step. This is when a new variable is added to path. To get the weights at each of these steps get_w_for_var() method is used. The argument is the index of the variable which should be in the range [0, path_size).
End of explanation
"""
feats = features(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'uci/housing/fm_housing.dat')))
train_labels = RegressionLabels(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'uci/housing/housing_label.dat')))
mat = feats.get_real_matrix('feature_matrix')
crime_rate = mat[0]
feats_train = RealFeatures(crime_rate.reshape(1, len(mat[0])))
preproc = RescaleFeatures()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor(True)
# Store preprocessed feature matrix.
preproc_data = feats_train.get_feature_matrix()
size=500
x1=linspace(0, 1, size)
width=0.5
tau=0.5
kernel=GaussianKernel(feats_train, feats_train, width)
krr=KernelRidgeRegression(tau, kernel, train_labels)
krr.train(feats_train)
feats_test=features(x1.reshape(1,len(x1)))
kernel.init(feats_train, feats_test)
out = krr.apply().get_labels()
#Visualization of regression
fig=figure(figsize(6,6))
#first plot with only one attribute
title("Regression with 1st attribute")
_=scatter(preproc_data[0:], train_labels.get_labels(), c=ones(506), cmap=gray(), s=20)
_=xlabel('Crime rate')
_=ylabel('Median value of homes')
_=plot(x1,out, linewidth=3)
"""
Explanation: Each color in the plot represents a coefficient and the vertical lines denote steps. It is clear that the weights are piecewise linear function of the norm.
Kernel Ridge Regression
Kernel ridge regression (KRR) is a kernel-based regularized form of regression. The dual form of Ridge regression can be shown to be:
$${\bf \alpha}=\left({\bf X}^T{\bf X}+\tau{\bf I}\right)^{-1}{\bf y} \quad \quad(1)$$
It can be seen that the equation to compute $\alpha$ only contains the vectors $\bf X$ in inner products with each other. If a non-linear mapping
$\Phi : x \rightarrow \Phi(x) \in \mathcal F$ is used, the equation can be defined in terms of inner products $\Phi(x)^T \Phi(x)$ instead. We can then use the kernel trick where a kernel function, which can be evaluated efficiently, is choosen $K({\bf x_i, x_j})=\Phi({\bf x_i})\Phi({\bf x_j})$. This is done because it is sufficient to know these inner products only, instead of the actual vectors $\bf x_i$. Linear regression methods like above discussed Ridge Regression can then be carried out in the feature space by using a kernel function representing a non-linear map which amounts to nonlinear regression in original input space.
KRR can be performed in Shogun using CKernelRidgeRegression class. Let us apply it on a non linear regression problem from the Boston Housing Dataset, where the task is to predict prices of houses by finding a relationship with the various attributes provided. The per capita crime rate attribute is used in this particular example.
End of explanation
"""
# Use different kernels
gaussian_kernel=GaussianKernel(feats_train, feats_train, 0.1)
#Polynomial kernel of degree 2
poly_kernel=PolyKernel(feats_train, feats_train, 2, True)
linear_kernel=LinearKernel(feats_train, feats_train)
kernels=[linear_kernel, poly_kernel, gaussian_kernel]
svr_param=1
svr_C=10
svr=LibSVR(svr_C, svr_param, gaussian_kernel, train_labels, LIBSVR_EPSILON_SVR)
#Visualization of regression
x1=linspace(0, 1, size)
feats_test_=features(x1.reshape(1,len(x1)))
def svr_regress(kernels):
fig=figure(figsize(8,8))
for i, kernel in enumerate(kernels):
svr.put('kernel', kernel)
svr.train()
out=svr.apply(feats_test_).get_labels()
#subplot(1,len(kernels), i)
#first plot with only one attribute
title("Support Vector Regression")
_=scatter(preproc_data[0:], train_labels.get_labels(), c=ones(506), cmap=gray(), s=20)
_=xlabel('Crime rate')
_=ylabel('Median value of homes')
_=plot(x1,out, linewidth=3)
ylim([0, 40])
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="b")
p3 = Rectangle((0, 0), 1, 1, fc="g")
_=legend((p1, p2, p3), ["Gaussian Kernel", "Linear Kernel", "Polynomial Kernel"], loc=1)
svr_regress(kernels)
"""
Explanation: As seen from the example KRR (using the kernel trick) can apply techniques for linear regression in the feature space to perform nonlinear regression in the input space.
Support Vector Regression
In Kernel Ridge Regression $(1)$ we have seen the result to be a dense solution. Thus all training examples are active which limits its usage to fewer number of training examples. Support Vector Regression (SVR) uses the concept of support vectors as in Support Vector Machines that leads to a sparse solution. In the SVM the penalty was paid for being on the wrong side of the discriminating plane. Here we do the same thing: we introduce a penalty for being far away from predicted line, but once you are close enough, i.e. in some “epsilon-tube” around this line, there is no penalty.
We are given a labeled set of input-output pairs $\mathcal{D}=(x_i,y_i)^N_{i=1}\subseteq \mathcal{X} \times \mathcal{Y}$ where $x\in\mathcal{X}$ and $y\in \mathcal{Y}$ and the primary problem is as follows:
$$\arg\min_{\mathbf{w},\mathbf{\xi}, b } ({\frac{1}{2} \|\mathbf{w}\|^2 +C \sum_{i=1}^n (\xi_i+ {\xi_i}^*)) }$$
For the constraints:
$$ {\bf w}^T{\bf x}_i+b-c_i-\xi_i\leq 0, \, \forall i=1\dots N$$
$$ -{\bf w}^T{\bf x}_i-b-c_i^-\xi_i^\leq 0, \, \forall i=1\dots N $$
with $c_i=y_i+ \epsilon$ and $c_i^*=-y_i+ \epsilon$
The resulting dual optimaization problem is:
$$ \max_{{\bf \alpha},{\bf \alpha}^} -\frac{1}{2}\sum_{i,j=1}^N(\alpha_i-\alpha_i^)(\alpha_j-\alpha_j^) {\bf x}i^T {\bf x}_j-\sum{i=1}^N(\alpha_i+\alpha_i^)\epsilon - \sum_{i=1}^N(\alpha_i-\alpha_i^)y_i\$$ $$ \mbox{wrt}:$$
$${\bf \alpha},{\bf \alpha}^\in{\bf R}^N\ \mbox{s.t.}: 0\leq \alpha_i,\alpha_i^\leq C,\, \forall i=1\dots N\ \sum_{i=1}^N(\alpha_i-\alpha_i^)y_i=0 $$
This class also support the $\nu$-SVR regression version of the problem, where $\nu$ replaces the $\epsilon$ parameter and represents an upper bound on the action of margin errors and a lower bound on the fraction of support vectors. The resulting problem generally takes a bit longer to solve. The details and comparison of these two versioins can be found in [1].
Let us try regression using Shogun's LibSVR. The dataset from last section is used. The svr_param argument is the $\epsilon$-tube for the $\epsilon$ version and is the $\nu$ parameter in other case.
End of explanation
"""
import time
gaussian_kernel=GaussianKernel(feats, feats, 13)
nus=[0.2, 0.4, 0.6, 0.8]
epsilons=[0.16, 0.09, 0.046, 0.0188]
svr_C=10
def compare_svr(nus, epsilons):
time_eps=[]
time_nus=[]
for i in range(len(epsilons)):
svr_param=1
svr=LibSVR(svr_C, epsilons[i], gaussian_kernel, train_labels, LIBSVR_EPSILON_SVR)
t_start=time.clock()
svr.train()
time_test=(time.clock() - t_start)
time_eps.append(time_test)
for i in range(len(nus)):
svr_param=1
svr=LibSVR(svr_C, nus[i], gaussian_kernel, train_labels, LIBSVR_NU_SVR)
t_start=time.clock()
svr.train()
time_test=(time.clock() - t_start)
time_nus.append(time_test)
print("-"*72 )
print("|", "%15s" % 'Nu' ,"|", "%15s" % 'Epsilon',"|","%15s" % 'Time (Nu)' ,"|", "%15s" % 'Time(Epsilon)' ,"|")
for i in range(len(nus)):
print( "-"*72 )
print( "|", "%15s" % nus[i] ,"|", "%15s" %epsilons[i],"|","%15s" %time_nus[i] ,"|", "%15s" %time_eps[i] ,"|" )
print("-"*72 )
title_='SVR Performance on Boston Housing dataset'
print("%50s" %title_)
compare_svr(nus, epsilons)
"""
Explanation: Let us do comparison of time taken for the 2 different models simliar to that done in section 6 of [1]. The Boston Housing Dataset is used.
End of explanation
"""
|
eds-uga/cbio4835-sp17 | lectures/Lecture27.ipynb | mit | import os
os.system("curl www.cnn.com -o cnn.html")
"""
Explanation: Lecture 27: Process control, multiprocessing, and fast code
CBIO (CSCI) 4835/6835: Introduction to Computational Biology
Overview and Objectives
As a final lecture, we'll go over how to extend the reach of your Python code beyond the confines of the script itself and interact with the computer and other programs. Additionally, we'll look at ways of speeding up your Python code. By the end of this lecture, you should be able to:
Implement entire pipelines of Python and external programs, using Python as the "glue"
Use subprocess to invoke arbitrary external programs
Reroute input and output to go through Python, and understand the issues that arise
Explain the Dining Philosophers problem in the context of multiprocessing
Use the multiprocessing and joblib libraries to write embarrassingly parallel code
Use the numba library to speed up computation-heavy code
Part 1: Going outside the [Python] box
Python has lots of tools and packages to help you perform whatever analysis or function you want to do.
But sometimes you need to integrate with programs that don't have a Python interface.
Or maybe you just love the command prompt that much.
Especially in computational biology, there are frequent examples of needing to interface with programs outside of Python.
Running molecular dynamics simulations
Retrieving digital information from a database
Moving files and folders around a computer
Invoking some external, specialized program to run an analysis, then using Python to synthesize the results
Python has a versatile subprocess module for calling and interacting with other programs.
However, first the venerable system command:
End of explanation
"""
f = open('cnn.html')
len(f.read())
"""
Explanation: For simple commands, this is great. But where it quickly wears out its welcome is how it handles what comes back from the commands: the return value of the system command is the exit code, not what is printed to screen.
End of explanation
"""
import subprocess
subprocess.run(["ls"])
subprocess.run(["touch", "test.txt"])
subprocess.run(["echo", "something", ">>", "test.txt"])
subprocess.run(["cat", "test.txt"])
"""
Explanation: What exit code indicates success?
-1
0
1
empty string
subprocess
The subprocess module replaces the following modules (so don't use them):
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Think of subprocess as a more powerful version of all these.
The most basic function in the subprocess module is run.
The first and only required argument is a list: it's the command-line command and all its arguments.
Remember commands like ls? cd? pwd? Think of these commands as functions--they can also have arguments.
If you don't want to give any arguments, you can give subprocess.run a list with one element: just the command you want to run.
If you do provide arguments, you give subprocess.run a list with multiple elements, where the first element is the command to run, and the subsequent elements are the arguments to that command.
Examples
Let's see some examples, shall we?
End of explanation
"""
subprocess.run(['ls','file with spaces'])
"""
Explanation: If there's some kind of oddity with the command you're trying to run, you'll get a nonzero exit code back.
End of explanation
"""
subprocess.run(['ls', 'file with spaces'], shell = True)
"""
Explanation: What's wrong here?
If the filename really has spaces, you need to "escape" the filename by using shell = True:
End of explanation
"""
subprocess.run(['ls', 'file', 'with', 'spaces'])
"""
Explanation: If you're trying to run ls on three separate files: file, with, and spaces, you need to separate them into distinct list elements:
End of explanation
"""
f = open('dump','w')
subprocess.run('ls', stdout = f)
f = open('dump','r') #this would be a very inefficient way to get the stdout of a program
print(f.readlines())
f = open('dump','w')
subprocess.run(['ls','nonexistantfile'], stdout = f, stderr = subprocess.STDOUT) #you can redirect stderr to stdout
print(open('dump').read())
"""
Explanation: This is all well and good--I can run commands and see whether or not they worked--but usually when you run external programs, it's to generate some kind of output that you'll then want Python to use.
How do we access this output?
First, we'll need to introduce a new concept to all programming languages: input and output streams.
Standard streams
Otherwise known as "standard input", "standard output", and "standard error", or in programming parlance:
stdin - standard input is usually from the keyboard
stdout - standard output, usually from print() statements
stderr - standard error, usually from errors
stdin, stdout, and stderr specify the executed program's standard input, standard output and standard error file handles, respectively.
We have to redirect these streams within subprocess so we can see them from inside Python.
Redirecting to files
End of explanation
"""
proc = subprocess.Popen(["ls"], stdout = subprocess.PIPE)
print(proc.stdout.readlines())
proc = subprocess.Popen(['ls'], stdout = subprocess.PIPE, cwd = "/Users/squinn")
print(proc.stdout.readlines())
"""
Explanation: Yes, that's the error message from the command line. Rather than showing up on the command line, we've captured stderr to be the stdout of the Python subprocess, and then redirected the stdout to the file. Hence, the error message is in the file!
What can the targets of the stdout and stderr arguments be? Valid targets, as we've seen, include
an existing file object, created with open()
nothing at all, in which case the program will default to the existing stdin/stdout/stderr
subprocess.PIPE, which enables communication directly between your script and the program (BE CAREFUL WITH THIS)
subprocess.Popen
All the previous functions are just convenience wrappers around the Popen object.
In addition to accepting the command and its arguments as a list, and the stdout and stderr optional arguments, Popen includes the cwd argument, which sets the working directory of the process, or defaults to the current working directory of the Python script.
End of explanation
"""
proc = subprocess.Popen(['ls'], stdout = subprocess.PIPE)
print(proc.stdout.readline())
print(proc.stdout.readline())
for elem in proc.stdout.readlines():
print(elem)
"""
Explanation: subprocess.PIPE
So what is this mysterious PIPE attribute?
"Pipes" are a common operating system term for avenues of communication between different programs. In this case, a pipe is established between your Python program and whatever program you're running through subprocess.
If stdout/stdin/stderr is set to subprocess.PIPE, then that input/output stream of the external program is accessible through a file object in the returned object.
It's a regular ol' file object, so you have access to all the Python file object functions you know and love:
End of explanation
"""
proc = subprocess.Popen('cat', stdin = subprocess.PIPE, stdout = subprocess.PIPE)
"""
Explanation: Core Popen functions
Popen.poll() - check to see if process has terminated
Popen.wait() - wait for process to terminate (basically, ask your Python program to hang until the command is finished)
Popen.terminate() - terminate the process (ask nicely)
Popen.kill() - kill the process with extreme prejudice
End of explanation
"""
proc.stdin.write(bytes("Hello", encoding = 'utf-8'))
proc.stdin.close()
"""
Explanation: We've created two pipes between our Python program and the cat command--an input pipe to stdin, and an output pipe from stdout.
End of explanation
"""
print(proc.stdout.read())
"""
Explanation: Here, we've written some raw bytes to the input--to the cat program, this looks like it's coming from the keyboard!
End of explanation
"""
import multiprocessing
def f(x):
return x ** 2
pool = multiprocessing.Pool(processes = 4)
numbers_to_evaluate = range(20)
print(pool.map(f, numbers_to_evaluate))
"""
Explanation: Now we're reading the output of the cat program!
What would happen if in the previous code we omitted the proc.stdin.close() call?
Nothing would change
It would not print anything
It would print Hello after a pause
It would hang
Warning!
Managing simultaneous input and output is tricky and can easily lead to deadlocks.
For example, your script may be blocked waiting for output from the process which is blocked waiting for input.
<img src="http://i.stack.imgur.com/ezUGt.jpg">
The Dining Philosophers
<img src="http://adit.io/imgs/dining_philosophers/at_the_table.png" />
( https://en.wikipedia.org/wiki/Dining_philosophers_problem )
Part 2: Multitasking and parallal programming
There are several parallel programming models enabled by a variety of hardware (multicore, cloud computing, supercomputers, GPU).
<img src='http://images.anandtech.com/reviews/cpu/intel/SNBE/Core_I7_LGA_2011_Die.jpg'>
Threads vs. Processes
A thread of execution is the smallest sequence of programmed instructions that can be managed independently by an operating system scheduler.
A process is an instance of a computer program.
<center><img src='http://upload.wikimedia.org/wikipedia/commons/a/a5/Multithreaded_process.svg'></center>
The long and short of threads versus processes in Python is...
...always use processes.
Blah blah blah Global Interpreter Lock, blah blah blah only 1 thread allowed per Python process, blah blah blah just use multiprocessing.
Multiple simultaneous processes
Or multiprocessing.
This is the concept of parallel programming, or having your program do multiple things at the very same time.
With the rising popularity of multi-core computers, most computers these days can easily handle at least 4 parallel processes at once. Why not take advantage of that?
However, writing correct, high performance parallel code can be difficult: look no further than the Dining Philosophers problem.
...but in some cases, it's trivial.
A problem is embarassingly parallel if it can be easily separated into independent subtasks, each of which does a substantial amount of computation.
Fortunately, this happens a lot!
Apply this filter to 1000 images
Process these 5000 protein structures
Compute RMSDs of all frames in a trajectory
In cases like these, using Pools will get you a significant speedup (by however many cores you have).
Pools
multiprocessing supports the concept of a pool of workers. You initialize with the number of processes you want to run in parallel (the default is the number of CPUs on your system) and they are available for doing parallel work:
Then (and this is the somewhat-tricky part), you call map() and pass in two arguments:
- the function you want to call in parallel, and
- the values you want the function to evaluate in parallel
Clear as mud? Let's see an example.
Pool Example
End of explanation
"""
import multiprocessing
def chatty(conn): #this takes a Connection object representing one end of a pipe
msg = conn.recv()
conn.send("you sent me '" + msg + "'")
# Create the two ends of the pipe.
(c1,c2) = multiprocessing.Pipe()
# Spin off the process that runs the "Chatty" function.
p1 = multiprocessing.Process(target = chatty, args = (c2, ))
p1.start()
# Send a message to the process, and receive its response.
c1.send("Hello!")
result = c1.recv()
p1.join()
print(result)
"""
Explanation: Inter-process communication
While 90% of the work you'll likely do is embarrassingly parallel, some multiprocessing can't be done just with Pools.
Or, perhaps, you'll need to be able to communicate intermediate results between processes.
We can do this through queues and pipes.
Queues
multiprocessing.Queue provides a simple first-in-first-out messaging queue between Python processes.
put: put an element on the queue. This will block if the queue has filled up
get: get an element from the queue. This will block if the queue is empty.
Queues are great if all you want to do is basically "report" on the progress of a process. The process puts in updates, e.g. "20% finished", and the main Python script gets these updates and prints them out to you.
Pipes
A pipe is a communication channel between processes that can send and receive messages.
Pipe() return a tuple of Connection objects representing the ends of the pipe. Each connection object has the following methods:
send: sends data to other end of the pipe
recv: waits for data from other end of the pipe (unless pipe closed, then EOFError)
close: close the pipe
Unlike queues, pipes can support two-way communication between processes.
End of explanation
"""
import numpy as np
def identity(value):
return np.sqrt(value ** 2)
# Now do some computation.
array = range(100000)
retval = []
for i in array:
retval.append(identity(i))
"""
Explanation: joblib
joblib is a wonderful package that uses multiprocessing on the backend, but simplifies things greatly by removing a lot of boilerplate.
The most likely source of duplicated effort is in loops.
End of explanation
"""
from joblib import Parallel, delayed
retval = Parallel(n_jobs = 8, verbose = 1)(delayed(identity)(i) for i in array)
"""
Explanation: An important observation: no specific value of the array depends on any other. This means it doesn't matter the order in which these computations are performed. Plus it takes forever to run this on 100,000 numbers, one after another.
So why not perform them at the same time?
With multiprocessing, we had to set up a Pool and a map. Not with joblib:
End of explanation
"""
def frob(matrix):
rows = matrix.shape[0]
cols = matrix.shape[1]
frob_norm = 0.0
for i in range(rows):
for j in range(cols):
frob_norm += matrix[i, j] ** 2
return np.sqrt(frob_norm)
"""
Explanation: This is a bit tricky at first, but I promise it's more straightforward than multiprocessing.
Let's take the code bit by bit.
retval = Parallel(n_jobs = 8, verbose = 1)(delayed(identity)(i) for i in array)
Parallel(n_jobs = 8, verbose = 1): This sets up the parallel computation, specifying we want to use 8 separate processes. The verbose is just a logging argument--the higher the number, the more debugging output it spits out.
delayed(identity): This is a little syntax trick by joblib, but basically: whatever function you want to run in parallel, you pass in to the delayed() function.
(i): This is the argument you want to pass to the function you're running in parallel.
for i in array: and this is the loop through the data you want to process in parallel.
All the same pieces are there as in multiprocessing!
joblib just streamlines the process by assuming loops are your primary source of repetition (a good assumption), so it bakes its machinery into the loop structure.
Anytime you do parameter scans, data point preprocessing, anything that is "embarrassingly parallel" or that would use multiprocessing.Pool, use joblib instead.
Part 3: Extreme Python
Just a quick look at one of the more cutting-edge Python packages: numba
Let's say you're trying to compute the Frobenius norm on an alignment matrix from a molecular dynamics simulation.
(that's just a fancy way of saying "element-wise Euclidean distance")
End of explanation
"""
import numpy as np
x1 = np.random.random((10, 10)) # A 10x10 random matrix
f1 = frob(x1)
print(f1)
"""
Explanation: Let's see how it works.
End of explanation
"""
%timeit frob(x1)
"""
Explanation: Cool. Seems to have worked reasonably well. Out of sheer curiosity, how long did that take to run?
End of explanation
"""
x2 = np.random.random((100, 100)) # A 100x100 random matrix!
f2 = frob(x2)
print(f2)
%timeit frob(x2)
"""
Explanation: Not bad. $10^{-6}$ seconds per run.
How well does it scale if the matrix is an order of magnitude larger?
End of explanation
"""
x3 = np.random.random((1000, 1000)) # Yikes
f3 = frob(x3)
print(f3)
%timeit frob(x3)
"""
Explanation: Yikes--an order of magnitude in data size, but two orders of magnitude in runtime increase.
Let's try one more data size increase. I have a bad feeling about this...
End of explanation
"""
from numba import jit
@jit
def frob2(matrix):
rows = matrix.shape[0]
cols = matrix.shape[1]
frob_norm = 0.0
for i in range(rows):
for j in range(cols):
frob_norm += matrix[i, j] ** 2
return np.sqrt(frob_norm)
"""
Explanation: Another order of magnitude on the data, another two orders of magnitude on the runtime. Clearly not a good trend. Maybe a quadratic trend, in fact?
Point being, this code doesn't scale. At all.
Of course, the problem lies in the fact that you could be using NumPy array broadcasting. But let's say you didn't know about it.
Or, much more likely, it's a very small part of a much larger scientific program--complete with subprocesses and multiprocessing--and it's going to be tough to isolate a single part and optimize it.
"Just-in-time" compilation to the rescue!
End of explanation
"""
%timeit frob(x3)
%timeit frob2(x3)
"""
Explanation: I promise--other than the @jit decorator on top of the function definition, the code for frob2 is identical to that of frob.
Let's test this out on the third and largest test data!
End of explanation
"""
def frob3(matrix):
s = (matrix ** 2).sum()
return np.sqrt(s)
%timeit frob3(x3)
"""
Explanation: Woo! Got our three orders of magnitude back!
For the sake of completeness, let's see how this compares to a full NumPy array broadcasting version.
End of explanation
"""
|
tudarmstadt-lt/taxi | distributional_semantics.ipynb | apache-2.0 | def display_taxonomy(graph):
""" Display the taxonomy in a hierarchical layout """
pos = graphviz_layout(graph, prog='dot', args="-Grankdir=LR")
plt.figure(3,figsize=(48,144))
nx.draw(graph, pos, with_labels=True, arrows=True)
plt.show()
# Construct the networkx graph
def process_input(taxonomy):
""" Read the taxonomy and generate a networkx graph """
# Generated
df = pd.read_csv(
taxonomy,
sep='\t',
header=None,
names=['hyponym', 'hypernym'],
usecols=[1,2],
)
G = nx.DiGraph()
for rel in zip(list(df['hypernym']), list(df['hyponym'])):
rel_0 = rel[0]
rel_1 = rel[1]
# Simplify the compound words by replacing the whitespaces with underscores
if ' ' in rel[0]:
rel_0 = '_'.join(rel[0].split())
if ' ' in rel[1]:
rel_1 = '_'.join(rel[1].split())
G.add_edge(rel_0, rel_1)
return G
taxo_path = 'taxi_output/simple_full/science/science_en.csv-relations.csv-taxo-knn1.csv'
gs_path = 'eval/taxi_eval_archive/gold_standard/science.taxo'
G_taxo = process_input(taxo_path)
G_gold = process_input(gs_path)
print('Nodes in GS:', len(set(G_gold.nodes())))
print('Nodes in G Taxo:', len(set(G_taxo.nodes())))
new_nodes = set(G_gold.nodes()) - set(G_taxo.nodes())
len(new_nodes)
"""
Explanation: Construct the Networkx graph
From a csv file
End of explanation
"""
def load_vectors():
""" Load word vectors. """
embedding_dir = '/home/5aly/taxi/distributed_semantics/embeddings/'
poincare_model = model = PoincareModel.load(embedding_dir + 'embeddings_poincare_wordnet') # parent-cluster relationship
own_model = gensim.models.KeyedVectors.load(embedding_dir + 'own_embeddings_w2v') # family-cluster relationship
return poincare_model, own_model
poincare_w2v, own_w2v = load_vectors()
"""
Explanation: Load Word Vectors
End of explanation
"""
def create_children_clusters(own_model, graph):
""" This function returns a dictionary where corresponding to each key(node) is a graph of its children """
clustered_graph = {}
for node in graph.nodes():
clustered_graph[node] = nx.Graph()
successors = [s.lower() for s in graph.successors(node)]
for successor in successors:
try:
for word, _ in own_model.most_similar(successor, topn=100):
if word.lower() in successors:
clustered_graph[node].add_edge(successor, word.lower())
except KeyError: # If the word in not in vocabulary, check using the substring based method
successor_terms = successor.split('_')
if node in successor_terms:
clustered_graph[node].add_node(successor)
return clustered_graph
GC = create_children_clusters(own_w2v, G_taxo)
posI = graphviz_layout(GC['engineering'])
# plt.figure(2, figsize=(20, 20))
nx.draw(GC['engineering'], posI, with_labels=True, arrows=True)
plt.show()
"""
Explanation: Improving Taxonomy with Distributional Semantics
Create a networkx graph for each node containing only its children. Draw edges among the children based on the similarity with one another using word vectors.
End of explanation
"""
G_improved = G_taxo.copy()
def calculate_similarity(poincare_model, own_model, parent, family, node, exclude_parent, exclude_family):
# Similarity between the parent and a cluster
parent_similarity = 0
if not exclude_parent:
node_senses = [n_sense.name() for n_sense in wn.synsets(node) if node in n_sense.name()]
parent_senses = [p_sense.name() for p_sense in wn.synsets(parent) if parent in p_sense.name()]
for parent_sense in parent_senses:
for node_sense in node_senses:
try:
similarity = poincare_model.kv.similarity(parent_sense, node_sense)
if similarity > parent_similarity:
parent_similarity = similarity
except KeyError as e:
if parent_sense in str(e):
break
else:
continue
# Similarity between a family and a cluster
family_similarity = 0
if not exclude_family:
family_similarities = []
for f_item in family:
try:
family_similarities.append(own_model.similarity(f_item, node))
except KeyError as e: # skip the terms not in vocabulary
if node in str(e):
break
else:
continue
if len(family_similarities) > 0:
family_similarity = sum(family_similarities) / len(family_similarities)
# Final score is the average of both the similarities
return (parent_similarity + family_similarity) / 2
for node in new_nodes:
max_score = 0
max_score_node = ''
for p_node, graph in GC.items():
gc = chinese_whispers(graph, weighting='top', iterations=60)
for label, family in aggregate_clusters(gc).items():
score = calculate_similarity(poincare_w2v, own_w2v, p_node, family, node, False, False)
if score > max_score:
max_score = score
max_score_node = p_node
G_improved.add_edge(max_score_node, node)
"""
Explanation: Implementing Chinese Whispers Algorithm
Adding new nodes
Loop through all the new nodes.
For each removed node, find out the family and parent in the graph that has the maximum similarity with it.
End of explanation
"""
def tune_result(g_improved):
""" Filter the results i.e. remove all the isolated nodes and nodes with blank labels """
print('\nTuning the result...')
if '' in g_improved.nodes():
g_improved.remove_node('')
hypernyms = {x[0] for x in g_improved.edges()}
isolated_nodes = list(nx.isolates(g_improved))
for isolated_node in isolated_nodes:
terms = isolated_node.split('_')
if terms[-1] in hypernyms:
g_improved.add_edge(terms[-1], isolated_node)
elif terms[0] in hypernyms:
g_improved.add_edge(terms[0], isolated_node)
else:
g_improved.remove_node(isolated_node)
return g_improved
tune_result(G_improved)
print('Tuned.')
"""
Explanation: Tuning the nodes and the edges
End of explanation
"""
def save_result(result, path):
print('\nSaving the result...')
df_improved = pd.DataFrame(list(result.edges()), columns=['hypernym', 'hyponym'])
df_improved = df_improved[df_improved.columns.tolist()[::-1]]
# Replace the underscores with blanks
df_improved['hyponym'] = df_improved['hyponym'].apply(lambda x: x.replace('_', ' '))
df_improved['hypernym'] = df_improved['hypernym'].apply(lambda x: x.replace('_', ' '))
# Store the result
output_path = os.path.join(
'taxi_output', 'distributional_semantics',
os.path.basename(path) + '-' + 'new_ds' + os.path.splitext(path)[-1]
)
df_improved.to_csv(output_path, sep='\t', header=False)
print('Output saved at:', output_path)
return output_path
output_path = save_result(G_improved, taxo_path)
"""
Explanation: Save the result
End of explanation
"""
def visualize_clusters(graph):
""" Clusterize the nodes of a particular domain in a given graph """
graph_cluster = chinese_whispers(graph, weighting='top', iterations=60)
# Visualize the clustering of graph_cluster using NetworkX (requires matplotlib)
colors = [1. / graph_cluster.node[node]['label'] for node in graph_cluster.nodes()]
fig = plt.gcf()
fig.set_size_inches(20, 20)
nx.draw_networkx(graph_cluster, cmap=plt.get_cmap('jet'), node_color=colors, font_color='black')
plt.show()
GC_improved = create_children_clusters(own_w2v, G_improved)
domain = 'mechanical_engineering'
# Original clusters
visualize_clusters(GC[domain])
# Clusters after detaching
visualize_clusters(GC_detached[domain])
# Clusters after detaching and re-attaching the clusters
visualize_clusters(GC_improved[domain])
"""
Explanation: Results visualization
Clusters
End of explanation
"""
# View the original taxonomy
display_taxonomy(G)
# View the modified taxonomy
display_taxonomy(G_improved)
len(list(G.nodes()))
len(list(G_improved.nodes()))
"""
Explanation: Taxonomy
End of explanation
"""
|
tensorflow/hub | examples/colab/spice.ipynb | apache-2.0 | #@title Copyright 2020 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: Copyright 2020 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
!sudo apt-get install -q -y timidity libsndfile1
# All the imports to deal with sound data
!pip install pydub numba==0.48 librosa music21
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import matplotlib.pyplot as plt
import librosa
from librosa import display as librosadisplay
import logging
import math
import statistics
import sys
from IPython.display import Audio, Javascript
from scipy.io import wavfile
from base64 import b64decode
import music21
from pydub import AudioSegment
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
print("tensorflow: %s" % tf.__version__)
#print("librosa: %s" % librosa.__version__)
"""
Explanation: <table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/spice"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
<td>
<a href="https://tfhub.dev/google/spice/2"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a>
</td>
</table>
Pitch Detection with SPICE
This colab will show you how to use the SPICE model downloaded from TensorFlow Hub.
End of explanation
"""
#@title [Run this] Definition of the JS code to record audio straight from the browser
RECORD = """
const sleep = time => new Promise(resolve => setTimeout(resolve, time))
const b2text = blob => new Promise(resolve => {
const reader = new FileReader()
reader.onloadend = e => resolve(e.srcElement.result)
reader.readAsDataURL(blob)
})
var record = time => new Promise(async resolve => {
stream = await navigator.mediaDevices.getUserMedia({ audio: true })
recorder = new MediaRecorder(stream)
chunks = []
recorder.ondataavailable = e => chunks.push(e.data)
recorder.start()
await sleep(time)
recorder.onstop = async ()=>{
blob = new Blob(chunks)
text = await b2text(blob)
resolve(text)
}
recorder.stop()
})
"""
def record(sec=5):
try:
from google.colab import output
except ImportError:
print('No possible to import output from google.colab')
return ''
else:
print('Recording')
display(Javascript(RECORD))
s = output.eval_js('record(%d)' % (sec*1000))
fname = 'recorded_audio.wav'
print('Saving to', fname)
b = b64decode(s.split(',')[1])
with open(fname, 'wb') as f:
f.write(b)
return fname
#@title Select how to input your audio { run: "auto" }
INPUT_SOURCE = 'https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav' #@param ["https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav", "RECORD", "UPLOAD", "./drive/My Drive/YOUR_MUSIC_FILE.wav"] {allow-input: true}
print('You selected', INPUT_SOURCE)
if INPUT_SOURCE == 'RECORD':
uploaded_file_name = record(5)
elif INPUT_SOURCE == 'UPLOAD':
try:
from google.colab import files
except ImportError:
print("ImportError: files from google.colab seems to not be available")
else:
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
uploaded_file_name = next(iter(uploaded))
print('Uploaded file: ' + uploaded_file_name)
elif INPUT_SOURCE.startswith('./drive/'):
try:
from google.colab import drive
except ImportError:
print("ImportError: files from google.colab seems to not be available")
else:
drive.mount('/content/drive')
# don't forget to change the name of the file you
# will you here!
gdrive_audio_file = 'YOUR_MUSIC_FILE.wav'
uploaded_file_name = INPUT_SOURCE
elif INPUT_SOURCE.startswith('http'):
!wget --no-check-certificate 'https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav' -O c-scale.wav
uploaded_file_name = 'c-scale.wav'
else:
print('Unrecognized input format!')
print('Please select "RECORD", "UPLOAD", or specify a file hosted on Google Drive or a file from the web to download file to download')
"""
Explanation: The audio input file
Now the hardest part: Record your singing! :)
We provide four methods to obtain an audio file:
Record audio directly in colab
Upload from your computer
Use a file saved on Google Drive
Download the file from the web
Choose one of the four methods below.
End of explanation
"""
# Function that converts the user-created audio to the format that the model
# expects: bitrate 16kHz and only one channel (mono).
EXPECTED_SAMPLE_RATE = 16000
def convert_audio_for_model(user_file, output_file='converted_audio_file.wav'):
audio = AudioSegment.from_file(user_file)
audio = audio.set_frame_rate(EXPECTED_SAMPLE_RATE).set_channels(1)
audio.export(output_file, format="wav")
return output_file
# Converting to the expected format for the model
# in all the input 4 input method before, the uploaded file name is at
# the variable uploaded_file_name
converted_audio_file = convert_audio_for_model(uploaded_file_name)
# Loading audio samples from the wav file:
sample_rate, audio_samples = wavfile.read(converted_audio_file, 'rb')
# Show some basic information about the audio.
duration = len(audio_samples)/sample_rate
print(f'Sample rate: {sample_rate} Hz')
print(f'Total duration: {duration:.2f}s')
print(f'Size of the input: {len(audio_samples)}')
# Let's listen to the wav file.
Audio(audio_samples, rate=sample_rate)
"""
Explanation: Preparing the audio data
Now we have the audio, let's convert it to the expected format and then listen to it!
The SPICE model needs as input an audio file at a sampling rate of 16kHz and with only one channel (mono).
To help you with this part, we created a function (convert_audio_for_model) to convert any wav file you have to the model's expected format:
End of explanation
"""
# We can visualize the audio as a waveform.
_ = plt.plot(audio_samples)
"""
Explanation: First thing, let's take a look at the waveform of our singing.
End of explanation
"""
MAX_ABS_INT16 = 32768.0
def plot_stft(x, sample_rate, show_black_and_white=False):
x_stft = np.abs(librosa.stft(x, n_fft=2048))
fig, ax = plt.subplots()
fig.set_size_inches(20, 10)
x_stft_db = librosa.amplitude_to_db(x_stft, ref=np.max)
if(show_black_and_white):
librosadisplay.specshow(data=x_stft_db, y_axis='log',
sr=sample_rate, cmap='gray_r')
else:
librosadisplay.specshow(data=x_stft_db, y_axis='log', sr=sample_rate)
plt.colorbar(format='%+2.0f dB')
plot_stft(audio_samples / MAX_ABS_INT16 , sample_rate=EXPECTED_SAMPLE_RATE)
plt.show()
"""
Explanation: A more informative visualization is the spectrogram, which shows frequencies present over time.
Here, we use a logarithmic frequency scale, to make the singing more clearly visible.
End of explanation
"""
audio_samples = audio_samples / float(MAX_ABS_INT16)
"""
Explanation: We need one last conversion here. The audio samples are in int16 format. They need to be normalized to floats between -1 and 1.
End of explanation
"""
# Loading the SPICE model is easy:
model = hub.load("https://tfhub.dev/google/spice/2")
"""
Explanation: Executing the Model
Now is the easy part, let's load the model with TensorFlow Hub, and feed the audio to it.
SPICE will give us two outputs: pitch and uncertainty
TensorFlow Hub is a library for the publication, discovery, and consumption of reusable parts of machine learning models. It makes easy to use machine learning to solve your challenges.
To load the model you just need the Hub module and the URL pointing to the model:
End of explanation
"""
# We now feed the audio to the SPICE tf.hub model to obtain pitch and uncertainty outputs as tensors.
model_output = model.signatures["serving_default"](tf.constant(audio_samples, tf.float32))
pitch_outputs = model_output["pitch"]
uncertainty_outputs = model_output["uncertainty"]
# 'Uncertainty' basically means the inverse of confidence.
confidence_outputs = 1.0 - uncertainty_outputs
fig, ax = plt.subplots()
fig.set_size_inches(20, 10)
plt.plot(pitch_outputs, label='pitch')
plt.plot(confidence_outputs, label='confidence')
plt.legend(loc="lower right")
plt.show()
"""
Explanation: Note: An interesting detail here is that all the model urls from Hub can be used for download and also to read the documentation, so if you point your browser to that link you can read documentation on how to use the model and learn more about how it was trained.
With the model loaded, data prepared, we need 3 lines to get the result:
End of explanation
"""
confidence_outputs = list(confidence_outputs)
pitch_outputs = [ float(x) for x in pitch_outputs]
indices = range(len (pitch_outputs))
confident_pitch_outputs = [ (i,p)
for i, p, c in zip(indices, pitch_outputs, confidence_outputs) if c >= 0.9 ]
confident_pitch_outputs_x, confident_pitch_outputs_y = zip(*confident_pitch_outputs)
fig, ax = plt.subplots()
fig.set_size_inches(20, 10)
ax.set_ylim([0, 1])
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, )
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, c="r")
plt.show()
"""
Explanation: Let's make the results easier to understand by removing all pitch estimates with low confidence (confidence < 0.9) and plot the remaining ones.
End of explanation
"""
def output2hz(pitch_output):
# Constants taken from https://tfhub.dev/google/spice/2
PT_OFFSET = 25.58
PT_SLOPE = 63.07
FMIN = 10.0;
BINS_PER_OCTAVE = 12.0;
cqt_bin = pitch_output * PT_SLOPE + PT_OFFSET;
return FMIN * 2.0 ** (1.0 * cqt_bin / BINS_PER_OCTAVE)
confident_pitch_values_hz = [ output2hz(p) for p in confident_pitch_outputs_y ]
"""
Explanation: The pitch values returned by SPICE are in the range from 0 to 1. Let's convert them to absolute pitch values in Hz.
End of explanation
"""
plot_stft(audio_samples / MAX_ABS_INT16 ,
sample_rate=EXPECTED_SAMPLE_RATE, show_black_and_white=True)
# Note: conveniently, since the plot is in log scale, the pitch outputs
# also get converted to the log scale automatically by matplotlib.
plt.scatter(confident_pitch_outputs_x, confident_pitch_values_hz, c="r")
plt.show()
"""
Explanation: Now, let's see how good the prediction is: We will overlay the predicted pitches over the original spectrogram. To make the pitch predictions more visible, we changed the spectrogram to black and white.
End of explanation
"""
pitch_outputs_and_rests = [
output2hz(p) if c >= 0.9 else 0
for i, p, c in zip(indices, pitch_outputs, confidence_outputs)
]
"""
Explanation: Converting to musical notes
Now that we have the pitch values, let's convert them to notes!
This is part is challenging by itself. We have to take into account two things:
1. the rests (when there's no singing)
2. the size of each note (offsets)
1: Adding zeros to the output to indicate when there's no singing
End of explanation
"""
A4 = 440
C0 = A4 * pow(2, -4.75)
note_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
def hz2offset(freq):
# This measures the quantization error for a single note.
if freq == 0: # Rests always have zero error.
return None
# Quantized note.
h = round(12 * math.log2(freq / C0))
return 12 * math.log2(freq / C0) - h
# The ideal offset is the mean quantization error for all the notes
# (excluding rests):
offsets = [hz2offset(p) for p in pitch_outputs_and_rests if p != 0]
print("offsets: ", offsets)
ideal_offset = statistics.mean(offsets)
print("ideal offset: ", ideal_offset)
"""
Explanation: 2: Adding note offsets
When a person sings freely, the melody may have an offset to the absolute pitch values that notes can represent.
Hence, to convert predictions to notes, one needs to correct for this possible offset.
This is what the following code computes.
End of explanation
"""
def quantize_predictions(group, ideal_offset):
# Group values are either 0, or a pitch in Hz.
non_zero_values = [v for v in group if v != 0]
zero_values_count = len(group) - len(non_zero_values)
# Create a rest if 80% is silent, otherwise create a note.
if zero_values_count > 0.8 * len(group):
# Interpret as a rest. Count each dropped note as an error, weighted a bit
# worse than a badly sung note (which would 'cost' 0.5).
return 0.51 * len(non_zero_values), "Rest"
else:
# Interpret as note, estimating as mean of non-rest predictions.
h = round(
statistics.mean([
12 * math.log2(freq / C0) - ideal_offset for freq in non_zero_values
]))
octave = h // 12
n = h % 12
note = note_names[n] + str(octave)
# Quantization error is the total difference from the quantized note.
error = sum([
abs(12 * math.log2(freq / C0) - ideal_offset - h)
for freq in non_zero_values
])
return error, note
def get_quantization_and_error(pitch_outputs_and_rests, predictions_per_eighth,
prediction_start_offset, ideal_offset):
# Apply the start offset - we can just add the offset as rests.
pitch_outputs_and_rests = [0] * prediction_start_offset + \
pitch_outputs_and_rests
# Collect the predictions for each note (or rest).
groups = [
pitch_outputs_and_rests[i:i + predictions_per_eighth]
for i in range(0, len(pitch_outputs_and_rests), predictions_per_eighth)
]
quantization_error = 0
notes_and_rests = []
for group in groups:
error, note_or_rest = quantize_predictions(group, ideal_offset)
quantization_error += error
notes_and_rests.append(note_or_rest)
return quantization_error, notes_and_rests
best_error = float("inf")
best_notes_and_rests = None
best_predictions_per_note = None
for predictions_per_note in range(20, 65, 1):
for prediction_start_offset in range(predictions_per_note):
error, notes_and_rests = get_quantization_and_error(
pitch_outputs_and_rests, predictions_per_note,
prediction_start_offset, ideal_offset)
if error < best_error:
best_error = error
best_notes_and_rests = notes_and_rests
best_predictions_per_note = predictions_per_note
# At this point, best_notes_and_rests contains the best quantization.
# Since we don't need to have rests at the beginning, let's remove these:
while best_notes_and_rests[0] == 'Rest':
best_notes_and_rests = best_notes_and_rests[1:]
# Also remove silence at the end.
while best_notes_and_rests[-1] == 'Rest':
best_notes_and_rests = best_notes_and_rests[:-1]
"""
Explanation: We can now use some heuristics to try and estimate the most likely sequence of notes that were sung.
The ideal offset computed above is one ingredient - but we also need to know the speed (how many predictions make, say, an eighth?), and the time offset to start quantizing. To keep it simple, we'll just try different speeds and time offsets and measure the quantization error, using in the end the values that minimize this error.
End of explanation
"""
# Creating the sheet music score.
sc = music21.stream.Score()
# Adjust the speed to match the actual singing.
bpm = 60 * 60 / best_predictions_per_note
print ('bpm: ', bpm)
a = music21.tempo.MetronomeMark(number=bpm)
sc.insert(0,a)
for snote in best_notes_and_rests:
d = 'half'
if snote == 'Rest':
sc.append(music21.note.Rest(type=d))
else:
sc.append(music21.note.Note(snote, type=d))
#@title [Run this] Helper function to use Open Sheet Music Display (JS code) to show a music score
from IPython.core.display import display, HTML, Javascript
import json, random
def showScore(score):
xml = open(score.write('musicxml')).read()
showMusicXML(xml)
def showMusicXML(xml):
DIV_ID = "OSMD_div"
display(HTML('<div id="'+DIV_ID+'">loading OpenSheetMusicDisplay</div>'))
script = """
var div_id = {{DIV_ID}};
function loadOSMD() {
return new Promise(function(resolve, reject){
if (window.opensheetmusicdisplay) {
return resolve(window.opensheetmusicdisplay)
}
// OSMD script has a 'define' call which conflicts with requirejs
var _define = window.define // save the define object
window.define = undefined // now the loaded script will ignore requirejs
var s = document.createElement( 'script' );
s.setAttribute( 'src', "https://cdn.jsdelivr.net/npm/opensheetmusicdisplay@0.7.6/build/opensheetmusicdisplay.min.js" );
//s.setAttribute( 'src', "/custom/opensheetmusicdisplay.js" );
s.onload=function(){
window.define = _define
resolve(opensheetmusicdisplay);
};
document.body.appendChild( s ); // browser will try to load the new script tag
})
}
loadOSMD().then((OSMD)=>{
window.openSheetMusicDisplay = new OSMD.OpenSheetMusicDisplay(div_id, {
drawingParameters: "compacttight"
});
openSheetMusicDisplay
.load({{data}})
.then(
function() {
openSheetMusicDisplay.render();
}
);
})
""".replace('{{DIV_ID}}',DIV_ID).replace('{{data}}',json.dumps(xml))
display(Javascript(script))
return
# rendering the music score
showScore(sc)
print(best_notes_and_rests)
"""
Explanation: Now let's write the quantized notes as sheet music score!
To do it we will use two libraries: music21 and Open Sheet Music Display
Note: for simplicity, we assume here that all notes have the same duration (a half note).
End of explanation
"""
# Saving the recognized musical notes as a MIDI file
converted_audio_file_as_midi = converted_audio_file[:-4] + '.mid'
fp = sc.write('midi', fp=converted_audio_file_as_midi)
wav_from_created_midi = converted_audio_file_as_midi.replace(' ', '_') + "_midioutput.wav"
print(wav_from_created_midi)
"""
Explanation: Let's convert the music notes to a MIDI file and listen to it.
To create this file, we can use the stream we created before.
End of explanation
"""
!timidity $converted_audio_file_as_midi -Ow -o $wav_from_created_midi
"""
Explanation: To listen to it on colab, we need to convert it back to wav. An easy way of doing that is using Timidity.
End of explanation
"""
Audio(wav_from_created_midi)
"""
Explanation: And finally, listen the audio, created from notes, created via MIDI from the predicted pitches, inferred by the model!
End of explanation
"""
|
sdpython/pyquickhelper | _unittests/ut_ipythonhelper/data/having_a_form_in_a_notebook.ipynb | mit | from IPython.display import HTML, Javascript, display_html, display_javascript
input_form = """
<div style="background-color:gainsboro; width:500px; padding:10px;">
<label>Variable Name: </label>
<input type="text" id="var_name" value="myvar" size="170" />
<label>Variable Value: </label>
<input type="text" id="var_value" value="myvalue" />
<br />
<button onclick="set_value()">Set Value</button>
</div>
"""
javascript = """
function set_value(){
var var_name = document.getElementById('var_name').value;
var var_value = document.getElementById('var_value').value;
var command = var_name + " = '" + var_value + "'";
console.log("Executing Command: " + command);
var kernel = IPython.notebook.kernel;
kernel.execute(command);
}
"""
display_javascript(Javascript(javascript))
HTML(input_form)
myvar
"""
Explanation: Having a form in a notebook
Form
Animated output
A form with IPython 3+
Automated menu (this one is fix)
Form
This following trick is inspired from IPython Notebook: Javascript/Python Bi-directional Communication. The code is copy pasted below with some modifications.
End of explanation
"""
from pyquickhelper import open_html_form
params= {"module":"", "version":"v..."}
open_html_form(params, "fill the fields", "form1")
form1
"""
Explanation: Now we try to get something like this:
End of explanation
"""
from pyquickhelper import open_html_form
params= {"login":"", "password":""}
open_html_form(params, "credential", "credential")
credential
"""
Explanation: With a password:
End of explanation
"""
my_address = None
def custom_action(x):
x["combined"] = x["first_name"] + " " + x["last_name"]
return str(x)
from pyquickhelper import open_html_form
params = { "first_name":"", "last_name":"" }
open_html_form (params, title="enter your name", key_save="my_address", hook="custom_action(my_address)")
my_address
"""
Explanation: To excecute an instruction when the button Ok is clicked:
End of explanation
"""
from pyquickhelper.ipythonhelper import StaticInteract, RangeWidget, RadioWidget
def show_fib(N):
sequence = ""
a, b = 0, 1
for i in range(N):
sequence += "{0} ".format(a)
a, b = b, a + b
return sequence
StaticInteract(show_fib,
N=RangeWidget(1, 100, default=10))
"""
Explanation: Animated output
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def plot(amplitude, color):
fig, ax = plt.subplots(figsize=(4, 3),
subplot_kw={'axisbg':'#EEEEEE',
'axisbelow':True})
ax.grid(color='w', linewidth=2, linestyle='solid')
x = np.linspace(0, 10, 1000)
ax.plot(x, amplitude * np.sin(x), color=color,
lw=5, alpha=0.4)
ax.set_xlim(0, 10)
ax.set_ylim(-1.1, 1.1)
return fig
StaticInteract(plot,
amplitude=RangeWidget(0.1, 0.5, 0.1, default=0.4),
color=RadioWidget(['blue', 'green', 'red'], default='red'))
"""
Explanation: In order to have a fast display, the function show_lib is called for each possible version. If it is a graph, all possible graphs will be generated.
End of explanation
"""
from IPython.display import display
from IPython.html.widgets import Text
last_name = Text(description="Last Name")
first_name = Text(description="First Name")
display(last_name)
display(first_name)
first_name.value, last_name.value
"""
Explanation: A form with IPython 3+
Not yet ready and the form does not show up in the converted notebook. You need to execute the notebook.
End of explanation
"""
from jyquickhelper import add_notebook_menu
add_notebook_menu()
"""
Explanation: Automated menu
End of explanation
"""
|
KrusecN13/Knjige | Projekt_knjige.ipynb | mit | import pandas as pd
pd.options.display.max_rows = 12
pd.options.display.max_columns = 12
nagrade = pd.read_csv('csv-datoteke/knjige-nagrade.csv',index_col='Naslov')
stoletja = pd.read_csv('csv-datoteke/knjige.csv',index_col='Naslov')
"""
Explanation: # Knjige
Projekt z naslovom Knjige za predmet programiranje 1 z namenom analize ocen knjig in pisateljev.
## Priprava podatkov:
S spletne strani www.goodreads.com poberemo podatke za najboljše knjige po stoletjih. Ti podatki zajemajo:
* naslov knjige,
* leto prve izdaje,
* pisatelja,
* povprečno oceno uporabnikov spletne strani,
* vrstni red, ki so dosegle na strani.
Poleg teh podatkov poberemo tudi naslednje:
* zvrst knjige,
* naslov,
* leto nagrade,
ki pa zajemajo knjige, ki so prejele nagrado za najboljšo knjigo svoje zvrsti v posameznem letu 2010 - 2014.
Podatke shranimo v dve CSV datoteki, ki sta v mapi csv_datoteke.
Zatem naložimo paket in 2 tabeli.
End of explanation
"""
nagrade[:40]
stoletja
"""
Explanation: Podatke smo že prej prečistili s pomočjo modula html in funkcije unescape, zato da se bodo naslovi knjig res ujemali, saj je to edini podatek, ki povezuje obe tabeli.
Spodaj je del obeh tebel:
End of explanation
"""
stoletja['Stoletje'] = (stoletja.Leto_izdaje // 100) + 1
stoletja
povprecje = stoletja.groupby('Stoletje').mean()
povprecje['Ocena']
%matplotlib inline
povprecje['Ocena'].plot(title = 'Povprečna ocena knjig posameznega stoletja:').set_ylabel('Ocena')
"""
Explanation: Analiza podatkov:
Ocene knjig po stoletjih:
Najprej bomo analizirali podatke o knjigah po stoletjih. Ogledali si bomo povprečne ocene glasovanja za najboljšo knjigo stoletja. Najprej dodamo v našo tabelo še en stolpec, v katerem dodamo stoletje v katerem je bila knjiga izdana.
Spodaj vidimo prikaz te tabele.
Poleg tega izračunamo še povprečne ocene knjig po stoletjih. Najprej jih prikažemo, potem pa s pomočjo njih narišemo še graf.
End of explanation
"""
stoletja[stoletja.Stoletje == 14]
"""
Explanation: Kot vidimo na grafu in na zgornjih podatkih o povprečni oceni, so bile knjige med 13. in 14. stoletjem v rasti. Iz tega obdobja prihajano različna dela, ki so prikazani na spodnji tabeli.
End of explanation
"""
a = stoletja.groupby('Pisatelj').mean()['Ocena'].sort_values(ascending = False)
a
"""
Explanation: Kasneje so ocene padale, najslabše ocenjene knjige so bile v 17. stoletju. V tistem obdobju so pisali Shakespeare, Molière, Descartes, ki so sicer priznani pisatelji.
Po 18. stoletju, pa so se ocene knjig le dvigovale, z izjemo 21. stoletja, v katerem so ocene ponovno začele padati.
Ocene knjig posameznega pisatelja:
Najprej ustvarimo tabelo pisateljev, ter njihovih ocen, ki so povprečna ocena vseh njegovih del.
End of explanation
"""
prvi_4 = a[:15].plot(kind='bar',title = 'Povprečna ocena knjig posameznega pisatelja:', y=(3,5.5)).set_ylabel('Ocena')
drugi_4 = a[15:30].plot(kind='bar',title = 'Povprečna ocena knjig posameznega pisatelja:').set_ylabel('Ocena')
"""
Explanation: Na spodnjih dveh grafih vidimo prvih 30 najvišje ocenjenih pisateljev, ter njihove ocene.
End of explanation
"""
vsi = pd.merge(nagrade, stoletja,left_index='Naslov', right_index='Naslov', how ='outer')
vsi[10:40]
"""
Explanation: Korelacije med ocenami:
Sedaj analiziramo še podatke iz druge tabele in ju združimo med seboj. S tem imamo še podatke o zvrsti ter prejeti nagradi.
Spodaj imamo to tabelo prikazano.
End of explanation
"""
vsi = pd.merge(nagrade, stoletja,left_index='Naslov', right_index='Naslov', how ='outer')
skupni = vsi.dropna(axis=0, how='any')
skupni
"""
Explanation: Večina knjig, ki je dosegla prvo mesto na podelitvi nagrad, ni prišla v izbor prvih 100 knjig stoletja. Tu se vidi različna merila med sodniki na podelitvi nagrad in med vsakdanjimi bralci, ki glasujejo na spletni strani.
V naslednji tabeli vidimo, da je le 8 knjig, ki imajo visoko uvrstitev na glasovanju in so dosegle prvo mesto na podelitvi nagrad Choice award.
End of explanation
"""
skupni['Ocena'].plot(kind='bar',title = 'Ocene zmagovalnih knjig:').set_ylabel('Ocena')
"""
Explanation: Sedaj si bomo ogledali še ocene knjig, ki so prejele nagrado in se uvrstile na top lestvico stoletja.
End of explanation
"""
|
tensorflow/workshops | kdd2019/colab/BERT fine-tuning and inferences with Cloud TPU.ipynb | apache-2.0 | # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: <a href="https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2018 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
assert 'COLAB_TPU_ADDR' in os.environ, 'ERROR: Not connected to a TPU runtime; please see the first cell in this notebook for instructions!'
TPU_ADDRESS = 'grpc://' + os.environ['COLAB_TPU_ADDR']
print('TPU address is', TPU_ADDRESS)
from google.colab import auth
auth.authenticate_user()
with tf.Session(TPU_ADDRESS) as session:
print('TPU devices:')
pprint.pprint(session.list_devices())
# Upload credentials to TPU.
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f)
tf.contrib.cloud.configure_gcs(session, credentials=auth_info)
# Now credentials are set for all future sessions on this TPU.
"""
Explanation: BERT End to End (Fine-tuning + Predicting) in 5 minutes with Cloud TPU
Overview
BERT, or Bidirectional Embedding Representations from Transformers, is a new method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. The academic paper can be found here: https://arxiv.org/abs/1810.04805.
This Colab demonstates using a free Colab Cloud TPU to fine-tune sentence and sentence-pair classification tasks built on top of pretrained BERT models and
run predictions on tuned model. The colab demonsrates loading pretrained BERT models from both TF Hub and checkpoints.
Note: You will need a GCP (Google Compute Engine) account and a GCS (Google Cloud
Storage) bucket for this Colab to run.
Please follow the Google Cloud TPU quickstart for how to create GCP account and GCS bucket. You have $300 free credit to get started with any GCP product. You can learn more about Cloud TPU at https://cloud.google.com/tpu/docs.
This notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select File > View on GitHub.
Learning objectives
In this notebook, you will learn how to train and evaluate a BERT model using TPU.
Instructions
<h3><a href="https://cloud.google.com/tpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png" width="50"></a> Train on TPU</h3>
Create a Cloud Storage bucket for your TensorBoard logs at http://console.cloud.google.com/storage and fill in the BUCKET parameter in the "Parameters" section below.
On the main menu, click Runtime and select Change runtime type. Set "TPU" as the hardware accelerator.
Click Runtime again and select Runtime > Run All (Watch out: the "Colab-only auth for this notebook and the TPU" cell requires user input). You can also run the cells manually with Shift-ENTER.
Set up your TPU environment
In this section, you perform the following tasks:
Set up a Colab TPU running environment
Verify that you are connected to a TPU device
Upload your credentials to TPU to access your GCS bucket.
End of explanation
"""
import sys
!test -d bert_repo || git clone https://github.com/google-research/bert bert_repo
if not 'bert_repo' in sys.path:
sys.path += ['bert_repo']
# import python modules defined by BERT
import modeling
import optimization
import run_classifier
import run_classifier_with_tfhub
import tokenization
# import tfhub
import tensorflow_hub as hub
"""
Explanation: Prepare and import BERT modules
With your environment configured, you can now prepare and import the BERT modules. The following step clones the source code from GitHub and import the modules from the source. Alternatively, you can install BERT using pip (!pip install bert-tensorflow).
End of explanation
"""
TASK = 'MRPC' #@param {type:"string"}
assert TASK in ('MRPC', 'CoLA'), 'Only (MRPC, CoLA) are demonstrated here.'
# Download glue data.
! test -d download_glue_repo || git clone https://gist.github.com/60c2bdb54d156a41194446737ce03e2e.git download_glue_repo
!python download_glue_repo/download_glue_data.py --data_dir='glue_data' --tasks=$TASK
TASK_DATA_DIR = 'glue_data/' + TASK
print('***** Task data directory: {} *****'.format(TASK_DATA_DIR))
!ls $TASK_DATA_DIR
BUCKET = 'YOUR_BUCKET' #@param {type:"string"}
assert BUCKET, 'Must specify an existing GCS bucket name'
OUTPUT_DIR = 'gs://{}/bert-tfhub/models/{}'.format(BUCKET, TASK)
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
# Available pretrained model checkpoints:
# uncased_L-12_H-768_A-12: uncased BERT base model
# uncased_L-24_H-1024_A-16: uncased BERT large model
# cased_L-12_H-768_A-12: cased BERT large model
BERT_MODEL = 'uncased_L-12_H-768_A-12' #@param {type:"string"}
BERT_MODEL_HUB = 'https://tfhub.dev/google/bert_' + BERT_MODEL + '/1'
"""
Explanation: Prepare for training
This next section of code performs the following tasks:
Specify task and download training data.
Specify BERT pretrained model
Specify GS bucket, create output directory for model checkpoints and eval results.
End of explanation
"""
tokenizer = run_classifier_with_tfhub.create_tokenizer_from_hub_module(BERT_MODEL_HUB)
tokenizer.tokenize("This here's an example of using the BERT tokenizer")
"""
Explanation: Now let's load tokenizer module from TF Hub and play with it.
End of explanation
"""
TRAIN_BATCH_SIZE = 32
EVAL_BATCH_SIZE = 8
PREDICT_BATCH_SIZE = 8
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
MAX_SEQ_LENGTH = 128
# Warmup is a period of time where hte learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 1000
SAVE_SUMMARY_STEPS = 500
processors = {
"cola": run_classifier.ColaProcessor,
"mnli": run_classifier.MnliProcessor,
"mrpc": run_classifier.MrpcProcessor,
}
processor = processors[TASK.lower()]()
label_list = processor.get_labels()
# Compute number of train and warmup steps from batch size
train_examples = processor.get_train_examples(TASK_DATA_DIR)
num_train_steps = int(len(train_examples) / TRAIN_BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
# Setup TPU related config
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(TPU_ADDRESS)
NUM_TPU_CORES = 8
ITERATIONS_PER_LOOP = 1000
def get_run_config(output_dir):
return tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=output_dir,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=ITERATIONS_PER_LOOP,
num_shards=NUM_TPU_CORES,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))
"""
Explanation: Also we initilize our hyperprams, prepare the training data and initialize TPU config.
End of explanation
"""
# Force TF Hub writes to the GS bucket we provide.
os.environ['TFHUB_CACHE_DIR'] = OUTPUT_DIR
model_fn = run_classifier_with_tfhub.model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=True,
bert_hub_module_handle=BERT_MODEL_HUB
)
estimator_from_tfhub = tf.contrib.tpu.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=get_run_config(OUTPUT_DIR),
train_batch_size=TRAIN_BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE,
predict_batch_size=PREDICT_BATCH_SIZE,
)
"""
Explanation: Fine-tune and Run Predictions on a pretrained BERT Model from TF Hub
This section demonstrates fine-tuning from a pre-trained BERT TF Hub module and running predictions.
End of explanation
"""
# Train the model
def model_train(estimator):
print('MRPC/CoLA on BERT base model normally takes about 2-3 minutes. Please wait...')
# We'll set sequences to be at most 128 tokens long.
train_features = run_classifier.convert_examples_to_features(
train_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
print('***** Started training at {} *****'.format(datetime.datetime.now()))
print(' Num examples = {}'.format(len(train_examples)))
print(' Batch size = {}'.format(TRAIN_BATCH_SIZE))
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print('***** Finished training at {} *****'.format(datetime.datetime.now()))
model_train(estimator_from_tfhub)
def model_eval(estimator):
# Eval the model.
eval_examples = processor.get_dev_examples(TASK_DATA_DIR)
eval_features = run_classifier.convert_examples_to_features(
eval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
print('***** Started evaluation at {} *****'.format(datetime.datetime.now()))
print(' Num examples = {}'.format(len(eval_examples)))
print(' Batch size = {}'.format(EVAL_BATCH_SIZE))
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
eval_steps = int(len(eval_examples) / EVAL_BATCH_SIZE)
eval_input_fn = run_classifier.input_fn_builder(
features=eval_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=True)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
print('***** Finished evaluation at {} *****'.format(datetime.datetime.now()))
output_eval_file = os.path.join(OUTPUT_DIR, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
print("***** Eval results *****")
for key in sorted(result.keys()):
print(' {} = {}'.format(key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
model_eval(estimator_from_tfhub)
def model_predict(estimator):
# Make predictions on a subset of eval examples
prediction_examples = processor.get_dev_examples(TASK_DATA_DIR)[:PREDICT_BATCH_SIZE]
input_features = run_classifier.convert_examples_to_features(prediction_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=True)
predictions = estimator.predict(predict_input_fn)
for example, prediction in zip(prediction_examples, predictions):
print('text_a: %s\ntext_b: %s\nlabel:%s\nprediction:%s\n' % (example.text_a, example.text_b, str(example.label), prediction['probabilities']))
model_predict(estimator_from_tfhub)
"""
Explanation: At this point, you can now fine-tune the model, evaluate it, and run predictions on it.
End of explanation
"""
# Setup task specific model and TPU running config.
BERT_PRETRAINED_DIR = 'gs://cloud-tpu-checkpoints/bert/' + BERT_MODEL
print('***** BERT pretrained directory: {} *****'.format(BERT_PRETRAINED_DIR))
!gsutil ls $BERT_PRETRAINED_DIR
CONFIG_FILE = os.path.join(BERT_PRETRAINED_DIR, 'bert_config.json')
INIT_CHECKPOINT = os.path.join(BERT_PRETRAINED_DIR, 'bert_model.ckpt')
model_fn = run_classifier.model_fn_builder(
bert_config=modeling.BertConfig.from_json_file(CONFIG_FILE),
num_labels=len(label_list),
init_checkpoint=INIT_CHECKPOINT,
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=True,
use_one_hot_embeddings=True
)
OUTPUT_DIR = OUTPUT_DIR.replace('bert-tfhub', 'bert-checkpoints')
tf.gfile.MakeDirs(OUTPUT_DIR)
estimator_from_checkpoints = tf.contrib.tpu.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=get_run_config(OUTPUT_DIR),
train_batch_size=TRAIN_BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE,
predict_batch_size=PREDICT_BATCH_SIZE,
)
"""
Explanation: Fine-tune and run predictions on a pre-trained BERT model from checkpoints
Alternatively, you can also load pre-trained BERT models from saved checkpoints.
End of explanation
"""
model_train(estimator_from_checkpoints)
model_eval(estimator_from_checkpoints)
model_predict(estimator_from_checkpoints)
"""
Explanation: Now, you can repeat the training, evaluation, and prediction steps.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.20/_downloads/306dcf0b43a155a02804528d597e4e81/plot_roi_erpimage_by_rt.ipynb | bsd-3-clause | # Authors: Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.event import define_target_events
from mne.channels import make_1020_channel_selections
print(__doc__)
"""
Explanation: Plot single trial activity, grouped by ROI and sorted by RT
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
The EEGLAB example file, which contains an experiment with button press
responses to simple visual stimuli, is read in and response times are
calculated.
Regions of Interest are determined by the channel types (in 10/20 channel
notation, even channels are right, odd are left, and 'z' are central). The
median and the Global Field Power within each channel group is calculated,
and the trials are plotted, sorting by response time.
End of explanation
"""
data_path = mne.datasets.testing.data_path()
fname = data_path + "/EEGLAB/test_raw.set"
event_id = {"rt": 1, "square": 2} # must be specified for str events
raw = mne.io.read_raw_eeglab(fname)
mapping = {
'EEG 000': 'Fpz', 'EEG 001': 'EOG1', 'EEG 002': 'F3', 'EEG 003': 'Fz',
'EEG 004': 'F4', 'EEG 005': 'EOG2', 'EEG 006': 'FC5', 'EEG 007': 'FC1',
'EEG 008': 'FC2', 'EEG 009': 'FC6', 'EEG 010': 'T7', 'EEG 011': 'C3',
'EEG 012': 'C4', 'EEG 013': 'Cz', 'EEG 014': 'T8', 'EEG 015': 'CP5',
'EEG 016': 'CP1', 'EEG 017': 'CP2', 'EEG 018': 'CP6', 'EEG 019': 'P7',
'EEG 020': 'P3', 'EEG 021': 'Pz', 'EEG 022': 'P4', 'EEG 023': 'P8',
'EEG 024': 'PO7', 'EEG 025': 'PO3', 'EEG 026': 'POz', 'EEG 027': 'PO4',
'EEG 028': 'PO8', 'EEG 029': 'O1', 'EEG 030': 'Oz', 'EEG 031': 'O2'
}
raw.rename_channels(mapping)
raw.set_channel_types({"EOG1": 'eog', "EOG2": 'eog'})
raw.set_montage('standard_1020')
events = mne.events_from_annotations(raw, event_id)[0]
"""
Explanation: Load EEGLAB example data (a small EEG dataset)
End of explanation
"""
# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = .7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
tmin=0., tmax=tmax, new_id=2)
epochs = mne.Epochs(raw, events=new_events, tmax=tmax + .1,
event_id={"square": 2})
"""
Explanation: Create Epochs
End of explanation
"""
# Parameters for plotting
order = rts.argsort() # sorting from fast to slow trials
selections = make_1020_channel_selections(epochs.info, midline="12z")
# The actual plots (GFP)
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='gfp',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
"""
Explanation: Plot using :term:Global Field Power <GFP>
End of explanation
"""
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='median',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
"""
Explanation: Plot using median
End of explanation
"""
|
simpleoier/2016FallSpeechProj | 1. Analysis.ipynb | apache-2.0 | import numpy as np
import os
from sklearn.manifold import TSNE
from common import Data
lld=Data('lld')
lld.load_training_data()
print 'training feature shape: ', lld.feature.shape
print 'training label shape: ', lld.label.shape
#lld.load_test_data()
#print 'test feature shape: ',lld.feature_test.shape
#print 'test label shape: ',lld.label_test.shape
"""
Explanation: Feature Analysis
The standard features: LLR (low level discriptors)
File path: \emotiondetection\features_labels_lld
load data
class Data. please see common.py
get the training data
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
feature_table=[1,10,100,300]
for ind,fea in enumerate(feature_table):
f= lld.feature[:,fea]
plt.subplot(2,2,ind+1)
plt.hist(f)
#plt.title("Histogram of feature "+str(ind))
plt.axis('tight')
"""
Explanation: a. histogram
plot the histgram of one feature, to see what distribution the feature is.
End of explanation
"""
model=TSNE(n_components=2,random_state=0) # reduct the dimention to 2 for visualization
np.set_printoptions(suppress=True)
Y=model.fit_transform(lld.feature,lld.label) # the reducted data
plt.scatter(Y[:, 0], Y[:, 1],c=lld.label[:,0],cmap=plt.cm.Spectral)
plt.title('training data')
plt.axis('tight')
print Y.shape
"""
Explanation: Different features have different ditributions.
Some are subject to Gussain distribution.
b. t-SNE
use TSNE to see the linear separability of the data.
End of explanation
"""
|
Mynti207/cs207project | docs/stock_example_prices.ipynb | mit | # load data
with open('data/prices_include.json') as f:
stock_data_include = json.load(f)
with open('data/prices_exclude.json') as f:
stock_data_exclude = json.load(f)
# keep track of which stocks are included/excluded from the database
stocks_include = list(stock_data_include.keys())
stocks_exclude = list(stock_data_exclude.keys())
# check the number of market days in the year
num_days = len(stock_data_include[stocks_include[0]])
num_days
"""
Explanation: Stock Market Similarity Searches: Daily Prices
We have provided a year of daily closing prices for 379 S&P 500 stocks. We have explicitly excluded stocks with incomplete or missing data. We have pre-loaded 350 stocks in the database, and have excluded 29 stocks for later use in similarity searches.
Data source: <a href='www.stockwiz.com'>www.stockwiz.com</a>
End of explanation
"""
# 1. load the database server
# when running from the terminal
# python go_server_persistent.py --ts_length 245 --db_name 'stock_prices'
# here we load the server as a subprocess for demonstration purposes
server = subprocess.Popen(['python', '../go_server_persistent.py',
'--ts_length', str(num_days), '--data_dir', '../db_files', '--db_name', 'stock_prices'])
time.sleep(5) # make sure it loads completely
# 2. load the database webserver
# when running from the terminal
# python go_webserver.py
# here we load the server as a subprocess for demonstration purposes
webserver = subprocess.Popen(['python', '../go_webserver.py'])
time.sleep(5) # make sure it loads completely
# 3. import the web interface and initialize it
from webserver import *
web_interface = WebInterface()
"""
Explanation: Database Initialization
Let's start by initializing all the database components.
End of explanation
"""
# # insert into database
# for stock in stocks_include:
# web_interface.insert_ts(pk=stock, ts=TimeSeries(range(num_days), stock_data_include[stock]))
"""
Explanation: Stock Data Initialization
The database is now up and running. We have pre-loaded the data for you, but you can always unquote the code below to re-load the data if you accidentally delete it.
End of explanation
"""
len(web_interface.select())
"""
Explanation: Let's check how many stocks are currently in the database (should be 350).
End of explanation
"""
# let's look at the first 10 stocks
web_interface.select(fields=['ts'], additional={'sort_by': '+pk', 'limit': 10})
"""
Explanation: Let's look at the first 10 stocks, to check that the data has been loaded correctly.
End of explanation
"""
# # randomly pick vantage points
# # note: this can be time-intensive for a large number of vantage points
# num_vps = 10
# random_vps = np.random.choice(len(stocks_include), size=num_vps, replace=False)
# vpkeys = [stocks_include[s] for s in random_vps]
# # mark in database
# for vp in vpkeys:
# web_interface.insert_vp(vp)
"""
Explanation: Vantage Point Search
We need to initialize vantage points in order to carry out a vantage point search. Again, this has already been done for you, but you can re-create the results by running the following code.
End of explanation
"""
# pick the stock
stock = np.random.choice(stocks_exclude)
print('Stock:', stock)
# run the vantage point similarity search
result = web_interface.vp_similarity_search(TimeSeries(range(num_days), stock_data_exclude[stock]), 1)
stock_match = list(result)[0]
stock_ts = web_interface.select(fields=['ts'], md={'pk': stock_match})[stock_match]['ts']
print('Most similar stock:', stock_match)
# visualize similarity
plt.plot(stock_data_exclude[stock], label='Query:' + stock)
plt.plot(stock_ts.values(), label='Result:' + stock_match)
plt.xticks([])
plt.legend(loc='best')
plt.title('Daily Stock Price Similarity')
plt.show()
"""
Explanation: Let's pick one of our excluded stocks and carry out a vantage point similarity search.
End of explanation
"""
# pick the stock
stock = np.random.choice(stocks_exclude)
print('Stock:', stock)
# run the isax tree similarity search
result = web_interface.isax_similarity_search(TimeSeries(range(num_days), stock_data_exclude[stock]))
# could not find a match
if result == 'ERROR: NO_MATCH':
print('Could not find a similar stock.')
# found a match
else:
# closest time series
stock_match = list(result)[0]
stock_ts = web_interface.select(fields=['ts'], md={'pk': stock_match})[stock_match]['ts']
print('Most similar stock:', stock_match)
# visualize similarity
plt.plot(stock_data_exclude[stock], label='Query:' + stock)
plt.plot(stock_ts.values(), label='Result:' + stock_match)
plt.xticks([])
plt.legend(loc='best')
plt.title('Daily Stock Price Similarity')
plt.show()
"""
Explanation: iSAX Tree Search
Let's pick another one of our excluded stocks and carry out an iSAX tree similarity search. Note that this is an approximate search technique, so it will not always be able to find a similar stock.
End of explanation
"""
# pick the stock
stock = np.random.choice(stocks_exclude)
print('Stock:', stock)
# run the vantage point similarity search
result = web_interface.vp_similarity_search(TimeSeries(range(num_days), stock_data_exclude[stock]), 1)
match_vp = list(result)[0]
ts_vp = web_interface.select(fields=['ts'], md={'pk': match_vp})[match_vp]['ts']
print('VP search result:', match_vp)
# run the isax similarity search
result = web_interface.isax_similarity_search(TimeSeries(range(num_days), stock_data_exclude[stock]))
# could not find an isax match
if result == 'ERROR: NO_MATCH':
print('iSAX search result: Could not find a similar stock.')
# found a match
else:
# closest time series
match_isax = list(result)[0]
ts_isax = web_interface.select(fields=['ts'], md={'pk': match_isax})[match_isax]['ts']
print('iSAX search result:', match_isax)
# visualize similarity
plt.plot(stock_data_exclude[stock], label='Query:' + stock)
plt.plot(ts_vp.values(), label='Result:' + match_vp)
plt.plot(ts_isax.values(), label='Result:' + match_isax)
plt.xticks([])
plt.legend(loc='best')
plt.title('Daily Stock Price Similarity')
plt.show()
"""
Explanation: Comparing Similarity Searches
Now, let's pick one more random stock, carry out both types of similarity searches, and compare the results.
End of explanation
"""
print(web_interface.isax_tree())
"""
Explanation: iSAX Tree Representation
Finally, let's visualize the iSAX tree. The clusters represent groups of "similar" stocks.
End of explanation
"""
# terminate processes before exiting
os.kill(server.pid, signal.SIGINT)
time.sleep(5) # give it time to terminate
web_interface = None
webserver.terminate()
"""
Explanation: Termination
Always remember to terminate any outstanding processes!
End of explanation
"""
|
shaunharker/DSGRN | software/Server/Accounts/Skeleton/notebooks/Tutorials/DSGRN_Python_GettingStarted.ipynb | mit | import DSGRN
"""
Explanation: DSGRN Python Interface Tutorial
This notebook shows the basics of manipulating DSGRN with the python interface.
End of explanation
"""
network = DSGRN.Network("network.txt")
print(network)
print(network.graphviz())
"""
Explanation: Network
The starting point of the DSGRN analysis is a network specification file. We have a network specification file "network.txt" we will load.
End of explanation
"""
import graphviz
graph = graphviz.Source(network.graphviz())
graph
"""
Explanation: Graphviz
Many of the objects in DSGRN provide a method "graphviz" which emits a string understood by the graphviz language. In an iPython notebook we can embed these picture easily.
End of explanation
"""
parametergraph = DSGRN.ParameterGraph(network)
print("There are " + str(parametergraph.size()) + " nodes in the parameter graph.")
"""
Explanation: ParameterGraph
Given a network, there is an associated "Parameter Graph", which is a combinatorial representation of parameter space.
End of explanation
"""
parameterindex = 34892 # An integer in [0,32592)
parameter = parametergraph.parameter(parameterindex)
parameter
print(parameter)
"""
Explanation: Parameter
The ParameterGraph class may be regarded as a factory which produces parameter nodes. In the DSGRN code, parameter nodes are referred to simply as "parameters" and are represented as "Parameter" objects.
End of explanation
"""
domaingraph = DSGRN.DomainGraph(parameter)
domaingraph
graphviz.Source(domaingraph.graphviz())
print(domaingraph.coordinates(5)) # ... I wonder what region in phase space domain 5 corresponds to.
"""
Explanation: DomainGraph
Let's compute the dynamics corresponding to this parameter node. In particular, we can instruct DSGRN to create a "domaingraph" object.
End of explanation
"""
morsedecomposition = DSGRN.MorseDecomposition(domaingraph.digraph())
graphviz.Source(morsedecomposition.graphviz())
"""
Explanation: MorseDecomposition
Let's compute the partially ordered set of recurrent components (strongly connected components with an edge) of the domain graph.
End of explanation
"""
morsegraph = DSGRN.MorseGraph()
morsegraph.assign(domaingraph, morsedecomposition)
morsegraph
print(morsegraph)
graphviz.Source(morsegraph.graphviz())
"""
Explanation: MorseGraph
The final step in our analysis is the production of an annotated Morse graph.
End of explanation
"""
|
phungkh/phys202-2015-work | assignments/assignment04/MatplotlibEx01.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
"""
Explanation: Matplotlib Exercise 1
Imports
End of explanation
"""
import os
assert os.path.isfile('yearssn.dat')
"""
Explanation: Line plot of sunspot data
Download the .txt data for the "Yearly mean total sunspot number [1700 - now]" from the SILSO website. Upload the file to the same directory as this notebook.
End of explanation
"""
data=np.loadtxt('yearssn.dat')
year=data[:,0]
ssc=data[:,1]
assert len(year)==315
assert year.dtype==np.dtype(float)
assert len(ssc)==315
assert ssc.dtype==np.dtype(float)
"""
Explanation: Use np.loadtxt to read the data into a NumPy array called data. Then create two new 1d NumPy arrays named years and ssc that have the sequence of year and sunspot counts.
End of explanation
"""
plt.figure(figsize=(30,1))
plt.plot(year,ssc)
plt.xlabel('year')
plt.ylabel('ssc')
plt.title('Sunspots vs years')
plt.grid(True)
plt.box(False)
plt.xlim(right=2020)
assert True # leave for grading
"""
Explanation: Make a line plot showing the sunspot count as a function of year.
Customize your plot to follow Tufte's principles of visualizations.
Adjust the aspect ratio/size so that the steepest slope in your plot is approximately 1.
Customize the box, grid, spines and ticks to match the requirements of this data.
End of explanation
"""
plt.figure(figsize=(20,5))
plt.subplot(4,1,1)
plt.plot(year,ssc)
plt.xlim(1700,1800)
plt.ylabel('ssc')
plt.yticks([0,100,200],[0,100,200])
plt.box(False)
plt.tight_layout()
plt.subplot(4,1,2)
plt.plot(year,ssc)
plt.xlim(1801,1900)
plt.ylabel('ssc')
plt.yticks([0,100,200],[0,100,200])
plt.box(False)
plt.subplot(4,1,3)
plt.plot(year,ssc)
plt.xlim(1901,2000)
plt.ylabel('ssc')
plt.yticks([0,100,200],[0,100,200])
plt.box(False)
plt.subplot(4,1,4)
plt.plot(year,ssc)
plt.xlim(2000,2015)
plt.yticks([0,100,200],[0,100,200])
plt.ylabel('ssc')
plt.xlabel('year')
plt.box(False)
plt.tight_layout()
assert True # leave for grading
"""
Explanation: Describe the choices you have made in building this visualization and how they make it effective.
-Made fig size larger because of how squished the data points looked.
-Added axes titles ( so we know what we are looking at)
-Made gridlines to make it easier to pinpoint where the points are relative to scale
-Deleted the spines of the box, they are not needed
Now make 4 subplots, one for each century in the data set. This approach works well for this dataset as it allows you to maintain mild slopes while limiting the overall width of the visualization. Perform similar customizations as above:
Customize your plot to follow Tufte's principles of visualizations.
Adjust the aspect ratio/size so that the steepest slope in your plot is approximately 1.
Customize the box, grid, spines and ticks to match the requirements of this data.
End of explanation
"""
|
konstantinstadler/pymrio | doc/source/notebooks/working_with_oecd_icio.ipynb | gpl-3.0 | import pymrio
from pathlib import Path
oecd_storage = Path('/tmp/mrios/OECD')
meta_2018_download = pymrio.download_oecd(storage_folder=oecd_storage, years=[2011])
"""
Explanation: Working with the OECD - ICIO database
The OECD Inter-Country Input-Output tables (ICIO) are available on the OECD webpage.
The parsing function >parse_oecd< works for both, the 2016 and 2018 release.
The tables can either be downloaded manually (using the csv format), or the pymrio OECD automatic downloader can be used.
For example, to get the 2011 table of the 2018 release do:
End of explanation
"""
oecd_path_year = pymrio.parse_oecd(path=oecd_storage, year=2011)
"""
Explanation: OECD provides the data compressed in zip files. The pymrio oecd parser works with both, the compressed and unpacked version.
Parsing
To parse a single year of the database, either specify a path and year:
End of explanation
"""
oecd_file = pymrio.parse_oecd(path=oecd_storage / 'ICIO2018_2011.zip')
oecd_path_year == oecd_file
"""
Explanation: Or directly specify a file to parse:
End of explanation
"""
oecd_file.factor_inputs.F.head()
"""
Explanation: Note: The original OECD ICIO tables provide some disaggregation of the Mexican and Chinese tables for the interindustry flows. The pymrio parser automatically aggregates these into Chinese And Mexican totals. Thus, the MX1, MX2, .. and CN1, CN2, ... entries are aggregated into MEX and CHN.
Currently, the parser only includes the value added and taxes data given in original file as satellite accounts.
These are accessable in the extension "factor_inputs":
End of explanation
"""
|
tensorflow/docs-l10n | site/zh-cn/guide/tpu.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
import os
import tensorflow_datasets as tfds
"""
Explanation: 使用 TPU
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://tensorflow.google.cn/guide/tpu" class=""><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" class="">在 TensorFlow.org 上查看</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/tpu.ipynb" class=""><img src="https://tensorflow.google.cn/images/colab_logo_32px.png" class="">在 Google Colab 中运行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/tpu.ipynb" class=""><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png" class="">在 GitHub 上查看源代码</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/guide/tpu.ipynb" class=""><img src="https://tensorflow.google.cn/images/download_logo_32px.png" class="">下载笔记本</a></td>
</table>
目前,Keras 和 Google Colab 已提供对 Cloud TPU 的实验性支持。在运行此 Colab 笔记本之前,请在以下路径下检查笔记本设置,确保硬件加速器为 TPU:Runtime > Change runtime type > Hardware accelerator > TPU。
设置
End of explanation
"""
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
"""
Explanation: TPU 初始化
与运行用户 Python 程序的本地流程不同,TPU 通常位于 Cloud TPU 工作进程上。因此,需要完成一些初始化工作才能连接到远程集群并初始化 TPU。请注意,TPUClusterResolver 的 tpu 参数是一个仅适用于 Colab 的特殊地址。如果在 Google 计算引擎 (GCE) 上运行,应传入 CloudTPU 的名称。
注:必须将 TPU 初始化代码放在程序的开头位置。
End of explanation
"""
a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
with tf.device('/TPU:0'):
c = tf.matmul(a, b)
print("c device: ", c.device)
print(c)
"""
Explanation: 手动设备放置
初始化 TPU 后,您可以通过手动设备放置将计算分配给单个 TPU 设备。
End of explanation
"""
strategy = tf.distribute.experimental.TPUStrategy(resolver)
"""
Explanation: 分布策略
大多数情况下,用户会希望以数据并行方式在多个 TPU 上运行模型。分布策略是一个抽象概念,可用于在 CPU、GPU 或 TPU 上驱动模型。如果简单地交换分布策略,模型将在指定设备上运行。有关详细信息,请参阅分布策略指南。
首先,创建 TPUStrategy 对象。
End of explanation
"""
@tf.function
def matmul_fn(x, y):
z = tf.matmul(x, y)
return z
z = strategy.run(matmul_fn, args=(a, b))
print(z)
"""
Explanation: 要复制计算,以便在所有 TPU 核心中运行,可以直接将其传递给 strategy.run API。在下面的示例中,所有核心都会获得相同的输入 (a, b),并单独对每个核心执行矩阵乘法运算。输出是所有副本的值。
End of explanation
"""
def create_model():
return tf.keras.Sequential(
[tf.keras.layers.Conv2D(256, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(256, 3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)])
"""
Explanation: TPU 上的分类
我们已经学习了基本概念,现在来看看具体的示例。本指南会演示如何使用分布策略 tf.distribute.experimental.TPUStrategy 来驱动 Cloud TPU 和训练 Keras 模型。
定义 Keras 模型
下面是使用 Keras 的 MNIST 模型的定义,与您可能在 CPU 或 GPU 上使用的定义相同。请注意,需要将创建 Keras 模型的代码放在 strategy.scope 内,这样才能在每个 TPU 设备上创建变量。代码的其他部分不必放在策略作用域内。
End of explanation
"""
def get_dataset(batch_size, is_training=True):
split = 'train' if is_training else 'test'
dataset, info = tfds.load(name='mnist', split=split, with_info=True,
as_supervised=True, try_gcs=True)
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
dataset = dataset.map(scale)
# Only shuffle and repeat the dataset in training. The advantage to have a
# infinite dataset for training is to avoid the potential last partial batch
# in each epoch, so users don't need to think about scaling the gradients
# based on the actual batch size.
if is_training:
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
return dataset
"""
Explanation: 输入数据集
使用 Cloud TPU 时,有效利用 tf.data.Dataset API 很关键,因为如果提供数据的速度不够快,则无法利用 Cloud TPU。有关数据集性能的详细信息,请参阅输入流水线性能指南。
除了最简单的实验(使用 tf.data.Dataset.from_tensor_slices 或其他计算图中的数据)外,您都需要将数据集读取的所有数据文件存储在 Google Cloud Storage (GCS) 存储分区中。
对于大多数用例,建议将数据转换为 TFRecord 格式,并使用 tf.data.TFRecordDataset 进行读取。有关操作方法的详细信息,请参阅 TFRecord 和 tf.Example 教程。不过,这并非硬性要求,如果您愿意,可以使用其他数据集读取器(FixedLengthRecordDataset 或 TextLineDataset)。
使用 tf.data.Dataset.cache 可以将小型数据集完整地加载到内存中。
无论使用哪一种数据格式,我们都强烈建议使用大文件(100MB 左右)。在这种网络化环境下,这一点尤其重要,因为打开文件的开销非常高。
这里应使用 tensorflow_datasets 模块获取 MNIST 训练数据的副本。请注意,代码中已指定 try_gcs 使用公共 GCS 存储分区中提供的副本。如果不这样指定,TPU 将无法访问下载的数据。
End of explanation
"""
with strategy.scope():
model = create_model()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
batch_size = 200
steps_per_epoch = 60000 // batch_size
validation_steps = 10000 // batch_size
train_dataset = get_dataset(batch_size, is_training=True)
test_dataset = get_dataset(batch_size, is_training=False)
model.fit(train_dataset,
epochs=5,
steps_per_epoch=steps_per_epoch,
validation_data=test_dataset,
validation_steps=validation_steps)
"""
Explanation: 使用 Keras 高级别 API 训练模型
您可以只使用 Keras fit/compile API 训练模型。下面的示例并非特定于 TPU,如果您有多个 GPU,并且使用 MirroredStrategy 而不是 TPUStrategy,则可以编写与下面的示例相同的代码。要了解更多信息,请查阅 Keras 分布训练教程。
End of explanation
"""
with strategy.scope():
model = create_model()
model.compile(optimizer='adam',
# Anything between 2 and `steps_per_epoch` could help here.
experimental_steps_per_execution = 50,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
model.fit(train_dataset,
epochs=5,
steps_per_epoch=steps_per_epoch,
validation_data=test_dataset,
validation_steps=validation_steps)
"""
Explanation: 为了减少 Python 开销,同时最大限度提高 TPU 的性能,请尝试使用 Model.compile 的 experimental experimental_steps_per_execution 参数。在本例中,它可以将吞吐量提升约 50%:
End of explanation
"""
# Create the model, optimizer and metrics inside strategy scope, so that the
# variables can be mirrored on each device.
with strategy.scope():
model = create_model()
optimizer = tf.keras.optimizers.Adam()
training_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
'training_accuracy', dtype=tf.float32)
# Calculate per replica batch size, and distribute the datasets on each TPU
# worker.
per_replica_batch_size = batch_size // strategy.num_replicas_in_sync
train_dataset = strategy.experimental_distribute_datasets_from_function(
lambda _: get_dataset(per_replica_batch_size, is_training=True))
@tf.function
def train_step(iterator):
"""The step function for one training step"""
def step_fn(inputs):
"""The computation to run on each TPU device."""
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits, from_logits=True)
loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
training_loss.update_state(loss * strategy.num_replicas_in_sync)
training_accuracy.update_state(labels, logits)
strategy.run(step_fn, args=(next(iterator),))
"""
Explanation: 使用自定义训练循环训练模型
您还可以直接使用 tf.function 和 tf.distribute API 创建和训练模型。strategy.experimental_distribute_datasets_from_function API 用于从给定数据集函数分布数据集。请注意,在本例中,传递给数据集的批次大小是每个副本的批次大小,而非全局批次大小。要了解更多信息,请查阅使用 tf.distribute.Strategy 进行自定义训练教程。
首先,创建模型、数据集和 tf.function。
End of explanation
"""
steps_per_eval = 10000 // batch_size
train_iterator = iter(train_dataset)
for epoch in range(5):
print('Epoch: {}/5'.format(epoch))
for step in range(steps_per_epoch):
train_step(train_iterator)
print('Current step: {}, training loss: {}, accuracy: {}%'.format(
optimizer.iterations.numpy(),
round(float(training_loss.result()), 4),
round(float(training_accuracy.result()) * 100, 2)))
training_loss.reset_states()
training_accuracy.reset_states()
"""
Explanation: 然后运行训练循环。
End of explanation
"""
@tf.function
def train_multiple_steps(iterator, steps):
"""The step function for one training step"""
def step_fn(inputs):
"""The computation to run on each TPU device."""
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits, from_logits=True)
loss = tf.nn.compute_average_loss(loss, global_batch_size=batch_size)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
training_loss.update_state(loss * strategy.num_replicas_in_sync)
training_accuracy.update_state(labels, logits)
for _ in tf.range(steps):
strategy.run(step_fn, args=(next(iterator),))
# Convert `steps_per_epoch` to `tf.Tensor` so the `tf.function` won't get
# retraced if the value changes.
train_multiple_steps(train_iterator, tf.convert_to_tensor(steps_per_epoch))
print('Current step: {}, training loss: {}, accuracy: {}%'.format(
optimizer.iterations.numpy(),
round(float(training_loss.result()), 4),
round(float(training_accuracy.result()) * 100, 2)))
"""
Explanation: 在 tf.function 中利用多步法提高性能
在 tf.function 中运行多步可以提高性能。使用 tf.range 将 strategy.run 调用包装到 tf.function 内即可实现此目的。在 TPU 工作进程上,AutoGraph 会将其转换为 tf.while_loop。
在 tf.function 中,虽然多步法的性能更高,但是与单步法相比,可谓各有利弊。在 tf.function 中运行多步不够灵活,您无法在步骤中以 Eager 模式执行运算,也不能运行任意 Python 代码。
End of explanation
"""
|
sarathid/Learning | Deep_learning_ND/Week 1/dlnd-your-first-network/DLND-your-first-network/old_files/dlnd-your-first-neural-network_TRY.ipynb | gpl-3.0 | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*10].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1.0/(1+ np.exp(-x))
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)# signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs
# Output layer error is the difference between desired target and actual output.
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors)
# errors propagated to the hidden layer
hidden_grad = hidden_outputs * (1 - hidden_outputs)
# hidden layer gradients
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T)
# update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * np.dot((hidden_errors * hidden_grad), inputs.T)
# update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs# signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import sys
### Set the hyperparameters here ###
epochs = 2000
learning_rate = 0.03
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
# plt.ylim(ymax=0.5)
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of epochs
This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
"""
Explanation: Thinking about your results
Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
Your answer below
Unit tests
Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.
End of explanation
"""
|
jamesorr/CO2SYS-MATLAB | notebooks/CO2SYS-Matlab_derivnum.ipynb | mit | %load_ext oct2py.ipython
"""
Explanation: Calculate sensitivities with the derivnum add-on for CO2SYS-Matlab
James Orr<br>
<img align="left" width="50%" src="http://www.lsce.ipsl.fr/Css/img/banniere_LSCE_75.png"><br><br>
LSCE/IPSL, CEA-CNRS-UVSQ, Gif-sur-Yvette, France
27 February 2018 <br><br>
updated: 29 June 2020
Abstract: This notebook shows how to use the new derivnum add-on of CO2SYS-Matlab to calculate sensitivities, i.e. partial derivatives that express rates changes of computed CO2 system variables (e.g., [H+], $p$CO$2$, $\Omega_A$) per change in the selected input variable (e.g., $A\text{T}$, $C_\text{T}$, T, S, $K_1$, $K_2$). It uses CO2SYS-Matlab in octave, GNU's clone of Matlab. You can either inspect the HTML version of this file or execute its commands interactively in your browser. But for the latter, you'll need to install jupyter notebook, octave, and oct2py, which includes the python-octave interface called octavemagic. Fortunately, that installation is very easy (see below).
Table of Contents:
1. Basics (install octave & oct2py and load octave)
2. Compute sensitivities with `derivnum` add-on for CO2SYS (use ALK-DIC input pair)
3. same but for pH-ALK
4. same but for pCO2-DIC
5. same but for pCO2-ALK
6. same but for pCO2-pH
1. Basics
Run interactively
If you are visualizing this after clicking on the link to this file on github, you are seeing the HTML version of a jupyter notebook. Alternatively, you may run cells interactively and modify them if you have jupyter notebook installed on your machine. To install that software, just download the anaconda open software installer for your computing platform (Windows, OS X, or Linux) from https://www.anaconda.com/ and then follow the easy install instructions at
https://docs.anaconda.com/anaconda/install/
Then just download this jupyter notebook file as well as the 3 routines in the src directory (CO2SYS.m, errors.m, and derivnum.m). Afterwards, you'll only need to install octave and oct2py using the 1-line command in the following section.
Install octavemagic
To install the octavemagic funtionality, we must install oct2py, with the following command at the Unix prompt:
conda install -c conda-forge octave oct2py
That command also installs octave. Then launch the notebook as usual with the following command:
jupyter notebook
A new window or tab should then appear in your browser, showing the files in the directory from where you launched the above command. Then just click on one of the .ipynb files, such as this one.
Once the notebook file appears in your browser, move to any of its cells with your mouse. Run a cell by clicking on it and hitting Ctrl-Return. Alternatively, type Shift-Return to run a cell and then move to the next one. More information on all the interactive commands is available in the Jupyter Notebook Quick Start Guide: http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/execute.html
At the top of the notebook, you'll see a number of Tabs (File, Edit, View, Insert, ...). Those tabs provide commands that will allow you to do whatever you like. Under the Help tab you'll find keyboard shortcuts for commands. Alternatively, a cheat sheet for short cuts to commands within jupyter notebook is available at https://zenodo.org/record/44973/files/ipynb-cheat-sheet.pdf . Or use the command palette after typing Ctrl-Shift-P.
Documentation for octavemagic
Details on using octavemagic are here: https://ipython.org/ipython-doc/2/config/extensions/octavemagic.html
Load octave magic function
Because octavemagic is now in conda's oct2py module, it is loaded with a slight modification to what is given on the above web page, i.e., now with the command below
End of explanation
"""
%lsmagic
"""
Explanation: List all avaiable magics
End of explanation
"""
%%octave
addpath ("~/Software/MATLAB/CO2SYS-MATLAB/src")
"""
Explanation: Specify the directory where you have put the Matlab routines CO2SYS.m, errors.m, and derivnum.m.
End of explanation
"""
%%octave
help derivnum
"""
Explanation: Note: any notebook cell whose 1st line is %%octave means that all subsequent lines in that cell are in octave, not python
Get the documentation for using the derivnum routine
End of explanation
"""
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 2300; % ALK
PAR2 = 2000; % DIC
PAR1TYPE = 1; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 2; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# SI = 0; % Total dissolved inorganic silicon (Sit)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 10; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15= Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
"""
Explanation: Hint:
double click on left of box above to hide above output
single click to show it again
2. Compute sentitivities with derivnum add-on for CO2SYS-Matlab ($A_\text{T}$-$C_\text{T}$ pair)
2.1 Specify input variables and choices
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 2.2 Partial derivatives with respect to par1 (specified above as ALK)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 2.2 Partial derivatives with respect to par2 (specified above as DIC)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 2.3 Partial derivatives with respect to Temperature
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 2.4 Partial derivatives with respect to Salinity
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('K1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 2.1 Partial derivatives with respect to $K_1$
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('K1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('Kspa', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 2.6 Partial derivatives with respect to $K_A$ (solubility product for aragonite)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('Kspa', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 8.1; % pH
PAR2 = 2300; % ALK
PAR1TYPE = 3; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 1; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
#TEMPOUT = TEMPIN;
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 15; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
"""
Explanation: 3. Compute sentitivities with derivnum for a 2nd input pair (pH-$A_\text{T}$)
3.1 Specify input variables and choices
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 3.2 Partial derivatives with respect to H+ (since par1 is pH)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 3.3 Partial derivatives with respect to par2 (specified above as ALK)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 3.4 Partial derivatives with respect to T
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 400; % pCO2
PAR2 = 2300; % DIC
PAR1TYPE = 4; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 2; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 15; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
"""
Explanation: 4. Compute sentitivities with derivnum for a 3rd input pair (pCO2-$C_\text{T}$)
4.1 Specify input variables and choices
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 4.2 Partial derivatives with respect to par1 (specified as pCO2 above)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 4.3 Partial derivatives with respect to par2 (specified above as DIC)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 4.4 Partial derivatives with respect to T
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 4.5 Partial derivatives with respect to S
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 400; % pCO2
PAR2 = 2300; % ALK
PAR1TYPE = 4; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 1; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 10; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
"""
Explanation: 5. Compute sentitivities with derivnum for a 4th input pair (pCO2-$A_\text{T}$)
5.1 Specify input variables and choices
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 5.2 Partial derivatives with respect to par1 (specified as pCO2 above)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 5.3 Partial derivatives with respect to par2 (specified above as ALK)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 5.4 Partial derivatives with respect to T
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 5.5 Partial derivatives with respect to S
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 400; % pCO2
PAR2 = 8.0; % pH
PAR1TYPE = 4; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 3; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 10; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
"""
Explanation: 6. Compute sentitivities with derivnum for a 4th input pair (pCO2-pH)
6.1 Specify input variables and choices
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 6.2 Partial derivatives with respect to par1 (specified as pCO2 above)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 6.3 Partial derivatives with respect to par2 (specified above as pH)
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 6.4 Partial derivatives with respect to T
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
"""
Explanation: 6.5 Partial derivatives with respect to S
End of explanation
"""
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
"""
Explanation: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
End of explanation
"""
|
phungkh/phys202-2015-work | days/day11/Interpolation.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
"""
Explanation: Interpolation
Learning Objective: Learn to interpolate 1d and 2d datasets of structured and unstructured points using SciPy.
End of explanation
"""
x = np.linspace(0,4*np.pi,10)
x
"""
Explanation: Overview
We have already seen how to evaluate a Python function at a set of numerical points:
$$ f(x) \rightarrow f_i = f(x_i) $$
Here is an array of points:
End of explanation
"""
f = np.sin(x)
f
plt.plot(x, f, marker='o')
plt.xlabel('x')
plt.ylabel('f(x)');
"""
Explanation: This creates a new array of points that are the values of $\sin(x_i)$ at each point $x_i$:
End of explanation
"""
from scipy.interpolate import interp1d
"""
Explanation: This plot shows that the points in this numerical array are an approximation to the actual function as they don't have the function's value at all possible points. In this case we know the actual function ($\sin(x)$). What if we only know the value of the function at a limited set of points, and don't know the analytical form of the function itself? This is common when the data points come from a set of measurements.
Interpolation is a numerical technique that enables you to construct an approximation of the actual function from a set of points:
$$ {x_i,f_i} \rightarrow f(x) $$
It is important to note that unlike curve fitting or regression, interpolation doesn't not allow you to incorporate a statistical model into the approximation. Because of this, interpolation has limitations:
It cannot accurately construct the function's approximation outside the limits of the original points.
It cannot tell you the analytical form of the underlying function.
Once you have performed interpolation you can:
Evaluate the function at other points not in the original dataset.
Use the function in other calculations that require an actual function.
Compute numerical derivatives or integrals.
Plot the approximate function on a finer grid that the original dataset.
Warning:
The different functions in SciPy work with a range of different 1d and 2d arrays. To help you keep all of that straight, I will use lowercase variables for 1d arrays (x, y) and uppercase variables (X,Y) for 2d arrays.
1d data
We begin with a 1d interpolation example with regularly spaced data. The function we will use it interp1d:
End of explanation
"""
x = np.linspace(0,4*np.pi,10) # only use 10 points to emphasize this is an approx
f = np.sin(x)
"""
Explanation: Let's create the numerical data we will use to build our interpolation.
End of explanation
"""
sin_approx = interp1d(x, f, kind='cubic')
"""
Explanation: To create our approximate function, we call interp1d as follows, with the numerical data. Options for the kind argument includes:
linear: draw a straight line between initial points.
nearest: return the value of the function of the nearest point.
slinear, quadratic, cubic: use a spline (particular kinds of piecewise polynomial of a given order.
The most common case you will want to use is cubic spline (try other options):
End of explanation
"""
newx = np.linspace(0,4*np.pi,100)
newf = sin_approx(newx)
"""
Explanation: The sin_approx variabl that interp1d returns is a callable object that can be used to compute the approximate function at other points. Compute the approximate function on a fine grid:
End of explanation
"""
plt.plot(x, f, marker='o', linestyle='', label='original data')
plt.plot(newx, newf, marker='.', label='interpolated');
plt.legend();
plt.xlabel('x')
plt.ylabel('f(x)');
"""
Explanation: Plot the original data points, along with the approximate interpolated values. It is quite amazing to see how the interpolation has done a good job of reconstructing the actual function with relatively few points.
End of explanation
"""
plt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))
plt.xlabel('x')
plt.ylabel('Absolute error');
"""
Explanation: Let's look at the absolute error between the actual function and the approximate interpolated function:
End of explanation
"""
x = 4*np.pi*np.random.rand(15)
f = np.sin(x)
sin_approx = interp1d(x, f, kind='cubic')
# We have to be careful about not interpolating outside the range
newx = np.linspace(np.min(x), np.max(x),100)
newf = sin_approx(newx)
plt.plot(x, f, marker='o', linestyle='', label='original data')
plt.plot(newx, newf, marker='.', label='interpolated');
plt.legend();
plt.xlabel('x')
plt.ylabel('f(x)');
plt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))
plt.xlabel('x')
plt.ylabel('Absolute error');
"""
Explanation: 1d non-regular data
It is also possible to use interp1d when the x data is not regularly spaced. To show this, let's repeat the above analysis with randomly distributed data in the range $[0,4\pi]$. Everything else is the same.
End of explanation
"""
from scipy.interpolate import interp2d
"""
Explanation: Notice how the absolute error is larger in the intervals where there are no points.
2d structured
For the 2d case we want to construct a scalar function of two variables, given
$$ {x_i, y_i, f_i} \rightarrow f(x,y) $$
For now, we will assume that the points ${x_i,y_i}$ are on a structured grid of points. This case is covered by the interp2d function:
End of explanation
"""
def wave2d(x, y):
return np.sin(2*np.pi*x)*np.sin(3*np.pi*y)
"""
Explanation: Here is the actual function we will use the generate our original dataset:
End of explanation
"""
x = np.linspace(0.0, 1.0, 10)
y = np.linspace(0.0, 1.0, 10)
"""
Explanation: Build 1d arrays to use as the structured grid:
End of explanation
"""
X, Y = np.meshgrid(x, y)
Z = wave2d(X, Y)
"""
Explanation: Build 2d arrays to use in computing the function on the grid points:
End of explanation
"""
plt.pcolor(X, Y, Z)
plt.colorbar();
plt.scatter(X, Y);
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
"""
Explanation: Here is a scatter plot of the points overlayed with the value of the function at those points:
End of explanation
"""
wave2d_approx = interp2d(X, Y, Z, kind='cubic')
"""
Explanation: You can see in this plot that the function is not smooth as we don't have its value on a fine grid.
Now let's compute the interpolated function using interp2d. Notice how we are passing 2d arrays to this function:
End of explanation
"""
xnew = np.linspace(0.0, 1.0, 40)
ynew = np.linspace(0.0, 1.0, 40)
Xnew, Ynew = np.meshgrid(xnew, ynew) # We will use these in the scatter plot below
Fnew = wave2d_approx(xnew, ynew) # The interpolating function automatically creates the meshgrid!
Fnew.shape
"""
Explanation: Compute the interpolated function on a fine grid:
End of explanation
"""
plt.pcolor(xnew, ynew, Fnew);
plt.colorbar();
plt.scatter(X, Y, label='original points')
plt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
plt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);
"""
Explanation: Plot the original course grid of points, along with the interpolated function values on a fine grid:
End of explanation
"""
from scipy.interpolate import griddata
"""
Explanation: Notice how the interpolated values (green points) are now smooth and continuous. The amazing thing is that the interpolation algorithm doesn't know anything about the actual function. It creates this nice approximation using only the original course grid (blue points).
2d unstructured
It is also possible to perform interpolation when the original data is not on a regular grid. For this, we will use the griddata function:
End of explanation
"""
x = np.random.rand(100)
y = np.random.rand(100)
"""
Explanation: There is an important difference between griddata and the interp1d/interp2d:
interp1d and interp2d return callable Python objects (functions).
griddata returns the interpolated function evaluated on a finer grid.
This means that you have to pass griddata an array that has the finer grid points to be used. Here is the course unstructured grid we will use:
End of explanation
"""
f = wave2d(x, y)
"""
Explanation: Notice how we pass these 1d arrays to our function and don't use meshgrid:
End of explanation
"""
plt.scatter(x, y);
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
"""
Explanation: It is clear that our grid is very unstructured:
End of explanation
"""
xnew = np.linspace(x.min(), x.max(), 40)
ynew = np.linspace(y.min(), y.max(), 40)
Xnew, Ynew = np.meshgrid(xnew, ynew)
Xnew.shape, Ynew.shape
Fnew = griddata((x,y), f, (Xnew, Ynew), method='cubic', fill_value=0.0)
Fnew.shape
plt.pcolor(Xnew, Ynew, Fnew, label="points")
plt.colorbar()
plt.scatter(x, y, label='original points')
plt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
plt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);
"""
Explanation: To use griddata we need to compute the final (strcutured) grid we want to compute the interpolated function on:
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.13.1/examples/notebooks/generated/autoregressive_distributed_lag.ipynb | bsd-3-clause | import numpy as np
import pandas as pd
import seaborn as sns
sns.set_style("darkgrid")
sns.mpl.rc("figure", figsize=(16, 6))
sns.mpl.rc("font", size=14)
"""
Explanation: Autoregressive Distributed Lag (ARDL) models
ARDL Models
Autoregressive Distributed Lag (ARDL) models extend Autoregressive models with lags of explanatory variables. While ARDL models are technically AR-X models, the key difference is that ARDL models focus on the exogenous variables and selecting the correct lag structure from both the endogenous variable and the exogenous variables. ARDL models are also closely related to Vector Autoregressions, and a single ARDL is effectively one row of a VAR. The key distinction is that an ARDL assumes that the exogenous variables are exogenous in the sense that it is not necessary to include the endogenous variable as a predictor of the exogenous variables.
The full specification of ARDL models is
$$
Y_t = \underset{\text{Constant and Trend}}{\underbrace{\delta_0 + \delta_1 t + \ldots + \delta_k t^k}}
+ \underset{\text{Seasonal}}{\underbrace{\sum_{i=0}^{s-1} \gamma_i S_i}}
+ \underset{\text{Autoregressive}}{\underbrace{\sum_{p=1}^P \phi_p Y_{t-p}}}
+ \underset{\text{Distributed Lag}}{\underbrace{\sum_{k=1}^M \sum_{j=0}^{Q_k} \beta_{k,j} X_{k, t-j}}}
+ \underset{\text{Fixed}}{\underbrace{Z_t \Gamma}} + \epsilon_t
$$
The terms in the model are:
$\delta_i$: constant and deterministic time regressors. Set using trend.
$S_i$ are seasonal dummies which are included if seasonal=True.
$X_{k,t-j}$ are the exogenous regressors. There are a number of formats that can be used to specify which lags are included. Note that the included lag lengths do no need to be the same. If causal=True, then the lags start with lag 1. Otherwise lags begin with 0 so that the model included the contemporaneous relationship between $Y_t$ and $X_t$.
$Z_t$ are any other fixed regressors that are not part of the distributed lag specification. In practice these regressors may be included when they do no contribute to the long run-relationship between $Y_t$ and the vector of exogenous variables $X_t$.
${\epsilon_t}$ is assumed to be a White Noise process
End of explanation
"""
from statsmodels.datasets.danish_data import load
from statsmodels.tsa.api import ARDL
from statsmodels.tsa.ardl import ardl_select_order
data = load().data
data = data[["lrm", "lry", "ibo", "ide"]]
data.tail()
"""
Explanation: Data
This notebook makes use of money demand data from Denmark, as first used in S. Johansen and K. Juselius (1990). The key variables are:
lrm: Log of real money measured using M2
lry: Log of real income
ibo: Interest rate on bonds
ide: Interest rate of bank deposits
The standard model uses lrm as the dependent variable and the other three as exogenous drivers.
Johansen, S. and Juselius, K. (1990), Maximum Likelihood Estimation and Inference on Cointegration – with Applications to the Demand for Money, Oxford Bulletin of Economics and Statistics, 52, 2, 169–210.
We start by loading the data and examining it.
End of explanation
"""
_ = (data - data.mean()).plot()
"""
Explanation: We plot the demeaned data so that all series appear on the same scale. The lrm series appears to be non-stationary, as does lry. The stationarity of the other two is less obvious.
End of explanation
"""
sel_res = ardl_select_order(
data.lrm, 3, data[["lry", "ibo", "ide"]], 3, ic="aic", trend="c"
)
print(f"The optimal order is: {sel_res.model.ardl_order}")
"""
Explanation: Model Selection
ardl_select_order can be used to automatically select the order. Here we use min the minimum AIC among all modes that consider up to 3 lags of the endogenous variable and 3 lags of each exogenous variable. trend="c" indicates that a constant should be included in the model.
End of explanation
"""
res = sel_res.model.fit()
res.summary()
"""
Explanation: The optimal order is returned as the number of lags of the endogenous variable followed by each of the exogenous regressors. The attribute model on sel_res contains the model ARDL specification which can be used to call fit. Here we look at the summary where the L# indicates that lag length (e.g., L0 is no lag, i.e., $X_{k,t}$, L2 is 2 lags, i.e., $X_{k,t-2}$).
End of explanation
"""
sel_res = ardl_select_order(
data.lrm, 3, data[["lry", "ibo", "ide"]], 3, ic="bic", trend="c", glob=True
)
sel_res.model.ardl_order
"""
Explanation: Global searches
The selection criteria can be switched the BIC which chooses a smaller model. Here we also use the glob=True option to perform a global search which considers models with any subset of lags up to the maximum lag allowed (3 here). This option lets the model selection choose non-contiguous lag specifications.
End of explanation
"""
sel_res.model.ar_lags
sel_res.model.dl_lags
"""
Explanation: While the ardl_order shows the largest included lag of each variable, ar_lags and dl_lags show the specific lags included. The AR component is regular in the sense that all 3 lags are included. The DL component is not since ibo selects only lags 0 and 3 and ide selects only lags 2.
End of explanation
"""
for i, val in enumerate(sel_res.bic.head(10)):
print(f"{i+1}: {val}")
"""
Explanation: We can take a look at the best performing models according to the BIC which are stored in the bic property. ibo at lags 0 and 3 is consistently selected, as is ide at either lag 2 or 3, and lry at lag 0. The selected AR lags vary more, although all of the best specifications select some.
End of explanation
"""
res = ARDL(
data.lrm, 2, data[["lry", "ibo", "ide"]], {"lry": 1, "ibo": 2, "ide": 3}, trend="c"
).fit()
res.summary()
"""
Explanation: Direct Parameterization
ARDL models can be directly specified using the ARDL class. The first argument is the endogenous variable ($Y_t$). The second is the AR lags. It can be a constant, in which case lags 1, 2, ..., $P$ are included, or a list of specific lags indices to include (e.g., [1, 4]). The third are the exogenous variables, and the fourth is the list of lags to include. This can be one of
An int: Include lags 0, 1, ..., Q
A dict with column names when exog is a DataFrame or numeric column locations when exog is a NumPy array (e.g., {0:1, 1: 2, 2:3}, would match the specification below if a NumPy array was used.
A dict with column names (DataFrames) or integers (NumPy arrays) that contains a list of specific lags to include (e.g., {"lry":[0,2], "ibo":[1,2]}).
The specification below matches that model selected by ardl_select_order.
End of explanation
"""
y = np.asarray(data.lrm)
x = np.asarray(data[["lry", "ibo", "ide"]])
res = ARDL(y, 2, x, {0: 1, 1: 2, 2: 3}, trend="c").fit()
res.summary()
"""
Explanation: NumPy Data
Below we see how the specification of ARDL models differs when using NumPy arrays. The key difference is that the keys in the dictionary are now integers which indicate the column of x to use. This model is identical to the previously fit model and all key value match exactly (e.g., Log Likelihood).
End of explanation
"""
res = ARDL(
data.lrm,
2,
data[["lry", "ibo", "ide"]],
{"lry": 1, "ibo": 2, "ide": 3},
trend="c",
causal=True,
).fit()
res.summary()
"""
Explanation: Causal models
Using the causal=True flag eliminates lag 0 from the DL components, so that all variables included in the model are known at time $t-1$ when modeling $Y_t$.
End of explanation
"""
from statsmodels.tsa.api import UECM
sel_res = ardl_select_order(
data.lrm, 3, data[["lry", "ibo", "ide"]], 3, ic="aic", trend="c"
)
ecm = UECM.from_ardl(sel_res.model)
ecm_res = ecm.fit()
ecm_res.summary()
"""
Explanation: Unconstrained Error Correction Models (UECM)
Unconstrained Error Correction Models reparameterize ARDL model to focus on the long-run component of a time series. The reparameterized model is
$$
\Delta Y_t = \underset{\text{Constant and Trend}}{\underbrace{\delta_0 + \delta_1 t + \ldots + \delta_k t^k}}
+ \underset{\text{Seasonal}}{\underbrace{\sum_{i=0}^{s-1} \gamma_i S_i}}
+ \underset{\text{Long-Run}}{\underbrace{\lambda_0 Y_{t-1} + \sum_{b=1}^M \lambda_i X_{b,t-1}}}
+ \underset{\text{Autoregressive}}{\underbrace{\sum_{p=1}^P \phi_p \Delta Y_{t-p}}}
+ \underset{\text{Distributed Lag}}{\underbrace{\sum_{k=1}^M \sum_{j=0}^{Q_k} \beta_{k,j} \Delta X_{k, t-j}}}
+ \underset{\text{Fixed}}{\underbrace{Z_t \Gamma}} + \epsilon_t
$$
Most of the components are the same. The key differences are:
The levels only enter at lag 1
All other lags of $Y_t$ or $X_{k,t}$ are differenced
Due to their structure, UECM models do not support irregular lag specifications, and so lags specifications must be integers. The AR lag length must be an integer or None, while the DL lag specification can be an integer or a dictionary of integers. Other options such as trend, seasonal, and causal are identical.
Below we select a model and then using the class method from_ardl to construct the UECM. The parameter estimates prefixed with D. are differences.
End of explanation
"""
ecm_res.ci_summary()
"""
Explanation: Cointegrating Relationships
Because the focus is on the long-run relationship, the results of UECM model fits contains a number of properties that focus on the long-run relationship. These are all prefixed ci_, for cointegrating. ci_summary contains the normalized estimates of the cointegrating relationship and associated estimated values.
End of explanation
"""
_ = ecm_res.ci_resids.plot(title="Cointegrating Error")
"""
Explanation: ci_resids contains the long-run residual, which is the error the drives figure changes in $\Delta Y_t$.
End of explanation
"""
ecm = UECM(data.lrm, 2, data[["lry", "ibo", "ide"]], 2, seasonal=True)
seasonal_ecm_res = ecm.fit()
seasonal_ecm_res.summary()
"""
Explanation: Seasonal Dummies
Here we add seasonal terms, which appear to be statistically significant.
End of explanation
"""
seasonal_ecm_res.ci_summary()
"""
Explanation: All deterministic terms are included in the ci_ prefixed terms. Here we see the normalized seasonal effects in the summary.
End of explanation
"""
_ = seasonal_ecm_res.ci_resids.plot(title="Cointegrating Error with Seasonality")
"""
Explanation: The residuals are somewhat more random in appearance.
End of explanation
"""
greene = pd.read_fwf("http://www.stern.nyu.edu/~wgreene/Text/Edition7/TableF5-2.txt")
greene.head()
"""
Explanation: The relationship between Consumption and Growth
Here we look at an example from Greene's Econometric analysis which focuses on teh long-run relationship between consumption and growth. We start by downloading the raw data.
Greene, W. H. (2000). Econometric analysis 4th edition. International edition, New Jersey: Prentice Hall, 201-215.
End of explanation
"""
index = pd.to_datetime(
greene.Year.astype("int").astype("str")
+ "Q"
+ greene.qtr.astype("int").astype("str")
)
greene.index = index
greene.index.freq = greene.index.inferred_freq
greene.head()
"""
Explanation: We then transform the index to be a pandas DatetimeIndex so that we can easily use seasonal terms.
End of explanation
"""
greene["c"] = np.log(greene.realcons)
greene["g"] = np.log(greene.realgdp)
"""
Explanation: We defined g as the log of real gdp and c as the log of real consumption.
End of explanation
"""
sel_res = ardl_select_order(
greene.c, 8, greene[["g"]], 8, trend="c", seasonal=True, ic="aic"
)
ardl = sel_res.model
ardl.ardl_order
res = ardl.fit(use_t=True)
res.summary()
"""
Explanation: Lag Length Selection
The selected model contains 5 lags of consumption and 2 of growth (0 and 1). Here we include seasonal terms although these are not significant.
End of explanation
"""
sel_res = ardl_select_order(greene.c, 8, greene[["g"]], 8, trend="c", ic="aic")
uecm = UECM.from_ardl(sel_res.model)
uecm_res = uecm.fit()
uecm_res.summary()
"""
Explanation: from_ardl is a simple way to get the equivalent UECM specification. Here we rerun the selection without the seasonal terms.
End of explanation
"""
uecm_res.ci_summary()
_ = uecm_res.ci_resids.plot(title="Cointegrating Error")
"""
Explanation: We see that for every % increase in consumption, we need a 1.05% increase in gdp. In other words, the saving rate is estimated to be around 5%.
End of explanation
"""
uecm = UECM(greene.c, 2, greene[["g"]], 1, trend="c")
uecm_res = uecm.fit()
uecm_res.summary()
"""
Explanation: Direct Specification of UECM models
UECM can be used to directly specify model lag lengths.
End of explanation
"""
uecm_res.ci_summary()
"""
Explanation: The changes in the lag structure make little difference in the estimated long-run relationship.
End of explanation
"""
ecm = UECM(data.lrm, 3, data[["lry", "ibo", "ide"]], 3, trend="c")
ecm_fit = ecm.fit()
bounds_test = ecm_fit.bounds_test(case=4)
bounds_test
bounds_test.crit_vals
"""
Explanation: Bounds Testing
UECMResults expose the bounds test of Pesaran, Shin, and Smith (2001). This test facilitates testing whether there is a level relationship between a set of variables without identifying which variables are I(1). This test provides two sets of critical and p-values. If the test statistic is below the critical value for the lower bound, then there appears to be no levels relationship irrespective of the order or integration in the $X$ variables. If it is above the upper bound, then there appears to be a levels relationship again, irrespective of the order of integration of the $X$ variables. There are 5 cases covered in the paper that include different combinations of deterministic regressors in the model or the test.
$$\Delta Y_{t}=\delta_{0} + \delta_{1}t + Z_{t-1}\beta + \sum_{j=0}^{P}\Delta X_{t-j}\Gamma + \epsilon_{t}$$
where $Z_{t-1}$ includes both $Y_{t-1}$ and $X_{t-1}$.
The cases determine which deterministic terms are included in the model and which are tested as part of the test.
No deterministic terms
Constant included in both the model and the test
Constant included in the model but not in the test
Constant and trend included in the model, only trend included in the test
Constant and trend included in the model, neither included in the test
Here we run the test on the Danish money demand data set. Here we see the test statistic is above the 95% critical value for both the lower and upper.
Pesaran, M. H., Shin, Y., & Smith, R. J. (2001). Bounds testing approaches to the analysis of level relationships. Journal of applied econometrics, 16(3), 289-326.
End of explanation
"""
ecm = UECM(data.lrm, 3, data[["lry", "ibo", "ide"]], 3, trend="c")
ecm_fit = ecm.fit()
bounds_test = ecm_fit.bounds_test(case=3)
bounds_test
"""
Explanation: Case 3 also rejects the null of no levels relationship.
End of explanation
"""
|
AllenDowney/ModSimPy | notebooks/chap03.ipynb | mit | # Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim library
from modsim import *
# set the random number generator
np.random.seed(7)
"""
Explanation: Modeling and Simulation in Python
Chapter 3
Copyright 2017 Allen Downey
License: Creative Commons Attribution 4.0 International
End of explanation
"""
def step(state, p1, p2):
"""Simulate one minute of time.
state: bikeshare State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
"""
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
state.wellesley -= 1
state.olin += 1
def decorate_bikeshare():
"""Add a title and label the axes."""
decorate(title='Olin-Wellesley Bikeshare',
xlabel='Time step (min)',
ylabel='Number of bikes')
"""
Explanation: More than one State object
Here's the code from the previous chapter, with two changes:
I've added DocStrings that explain what each function does, and what parameters it takes.
I've added a parameter named state to the functions so they work with whatever State object we give them, instead of always using bikeshare. That makes it possible to work with more than one State object.
End of explanation
"""
def run_simulation(state, p1, p2, num_steps):
"""Simulate the given number of time steps.
state: State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
num_steps: number of time steps
"""
results = TimeSeries()
for i in range(num_steps):
step(state, p1, p2)
results[i] = state.olin
plot(results, label='Olin')
"""
Explanation: And here's run_simulation, which is a solution to the exercise at the end of the previous notebook.
End of explanation
"""
bikeshare1 = State(olin=10, wellesley=2)
bikeshare2 = State(olin=2, wellesley=10)
"""
Explanation: Now we can create more than one State object:
End of explanation
"""
bike_to_olin(bikeshare1)
bike_to_wellesley(bikeshare2)
"""
Explanation: Whenever we call a function, we indicate which State object to work with:
End of explanation
"""
bikeshare1
bikeshare2
"""
Explanation: And you can confirm that the different objects are getting updated independently:
End of explanation
"""
bikeshare = State(olin=10, wellesley=2)
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
"""
Explanation: Negative bikes
In the code we have so far, the number of bikes at one of the locations can go negative, and the number of bikes at the other location can exceed the actual number of bikes in the system.
If you run this simulation a few times, it happens often.
End of explanation
"""
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
if state.olin == 0:
return
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
if state.wellesley == 0:
return
state.wellesley -= 1
state.olin += 1
"""
Explanation: We can fix this problem using the return statement to exit the function early if an update would cause negative bikes.
End of explanation
"""
bikeshare = State(olin=10, wellesley=2)
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
"""
Explanation: Now if you run the simulation again, it should behave.
End of explanation
"""
x = 5
"""
Explanation: Comparison operators
The if statements in the previous section used the comparison operator ==. The other comparison operators are listed in the book.
It is easy to confuse the comparison operator == with the assignment operator =.
Remember that = creates a variable or gives an existing variable a new value.
End of explanation
"""
x == 5
"""
Explanation: Whereas == compares two values and returns True if they are equal.
End of explanation
"""
if x == 5:
print('yes, x is 5')
"""
Explanation: You can use == in an if statement.
End of explanation
"""
# If you remove the # from the if statement and run it, you'll get
# SyntaxError: invalid syntax
#if x = 5:
# print('yes, x is 5')
"""
Explanation: But if you use = in an if statement, you get an error.
End of explanation
"""
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0)
"""
Explanation: Exercise: Add an else clause to the if statement above, and print an appropriate message.
Replace the == operator with one or two of the other comparison operators, and confirm they do what you expect.
Metrics
Now that we have a working simulation, we'll use it to evaluate alternative designs and see how good or bad they are. The metric we'll use is the number of customers who arrive and find no bikes available, which might indicate a design problem.
First we'll make a new State object that creates and initializes additional state variables to keep track of the metrics.
End of explanation
"""
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
if state.olin == 0:
state.olin_empty += 1
return
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
if state.wellesley == 0:
state.wellesley_empty += 1
return
state.wellesley -= 1
state.olin += 1
"""
Explanation: Next we need versions of bike_to_wellesley and bike_to_olin that update the metrics.
End of explanation
"""
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
"""
Explanation: Now when we run a simulation, it keeps track of unhappy customers.
End of explanation
"""
bikeshare.olin_empty
bikeshare.wellesley_empty
"""
Explanation: After the simulation, we can print the number of unhappy customers at each location.
End of explanation
"""
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0,
clock=0)
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercises
Exercise: As another metric, we might be interested in the time until the first customer arrives and doesn't find a bike. To make that work, we have to add a "clock" to keep track of how many time steps have elapsed:
Create a new State object with an additional state variable, clock, initialized to 0.
Write a modified version of step that adds one to the clock each time it is invoked.
Test your code by running the simulation and check the value of clock at the end.
End of explanation
"""
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercise: Continuing the previous exercise, let's record the time when the first customer arrives and doesn't find a bike.
Create a new State object with an additional state variable, t_first_empty, initialized to -1 as a special value to indicate that it has not been set.
Write a modified version of step that checks whetherolin_empty and wellesley_empty are 0. If not, it should set t_first_empty to clock (but only if t_first_empty has not already been set).
Test your code by running the simulation and printing the values of olin_empty, wellesley_empty, and t_first_empty at the end.
End of explanation
"""
|
srippa/nn_deep | assignment1/svm.ipynb | mit | # Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
"""
Explanation: Multiclass Support Vector Machine exercise
Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website.
In this exercise you will:
implement a fully-vectorized loss function for the SVM
implement the fully-vectorized expression for its analytic gradient
check your implementation using numerical gradient
use a validation set to tune the learning rate and regularization strength
optimize the loss function with SGD
visualize the final learned weights
End of explanation
"""
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise.
num_training = 49000
num_validation = 1000
num_test = 1000
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print mean_image[:10] # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
# Also, lets transform both data matrices so that each image is a column.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]).T
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]).T
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]).T
print X_train.shape, X_val.shape, X_test.shape
"""
Explanation: CIFAR-10 Data Loading and Preprocessing
End of explanation
"""
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(10, 3073) * 0.0001
loss, grad = svm_loss_naive(W, X_train, y_train, 0.00001)
print 'loss: %f' % (loss, )
"""
Explanation: SVM Classifier
Your code for this section will all be written inside cs231n/classifiers/linear_svm.py.
As you can see, we have prefilled the function compute_loss_naive which uses for loops to evaluate the multiclass SVM loss function.
End of explanation
"""
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_train, y_train, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_train, y_train, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
"""
Explanation: The grad returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function svm_loss_naive. You will find it helpful to interleave your new code inside the existing function.
To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
End of explanation
"""
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_train, y_train, 0.00001)
toc = time.time()
print 'Naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_train, y_train, 0.00001)
toc = time.time()
print 'Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# The losses should match but your vectorized implementation should be much faster.
print 'difference: %f' % (loss_naive - loss_vectorized)
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_train, y_train, 0.00001)
toc = time.time()
print 'Naive loss and gradient: computed in %fs' % (toc - tic)
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_train, y_train, 0.00001)
toc = time.time()
print 'Vectorized loss and gradient: computed in %fs' % (toc - tic)
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'difference: %f' % difference
"""
Explanation: Inline Question 1:
It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? Hint: the SVM loss function is not strictly speaking differentiable
Your Answer: fill this in.
End of explanation
"""
# Now implement SGD in LinearSVM.train() function and run it with the code below
from cs231n.classifiers import LinearSVM
learning_rates = [1e-7, 5e-5]
regularization_strengths = [5e4, 1e5]
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-5, reg=5e4,
num_iters=1500, verbose=True)
toc = time.time()
print 'That took %fs' % (toc - tic)
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
y_val_pred = svm.predict(X_val)
print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [5e4, 1e5]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
sz = [results[x][0]*1500 for x in results] # default size of markers is 20
plt.subplot(1,2,1)
plt.scatter(x_scatter, y_scatter, sz)
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
sz = [results[x][1]*1500 for x in results] # default size of markers is 20
plt.subplot(1,2,2)
plt.scatter(x_scatter, y_scatter, sz)
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'linear SVM on raw pixels final test set accuracy: %f' % test_accuracy
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:,:-1] # strip out the bias
w = w.reshape(10, 32, 32, 3)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
"""
Explanation: Stochastic Gradient Descent
We now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
End of explanation
"""
|
SunPower/pvfactors | docs/tutorials/PVArray_introduction.ipynb | bsd-3-clause | # Import external libraries
import matplotlib.pyplot as plt
# Settings
%matplotlib inline
"""
Explanation: PV Array geometry introduction
In this section, we will learn how to:
create a 2D PV array geometry with PV rows at identical heights, tilt angles, and with identical widths
plot that PV array
calculate the inter-row direct shading, and get the length of the shadows on the PV rows
understand what timeseries geometries are, including ts_pvrows and ts_ground
Imports and settings
End of explanation
"""
pvarray_parameters = {
'n_pvrows': 4, # number of pv rows
'pvrow_height': 1, # height of pvrows (measured at center / torque tube)
'pvrow_width': 1, # width of pvrows
'axis_azimuth': 0., # azimuth angle of rotation axis
'surface_tilt': 20., # tilt of the pv rows
'surface_azimuth': 90., # azimuth of the pv rows front surface
'solar_zenith': 40., # solar zenith angle
'solar_azimuth': 150., # solar azimuth angle
'gcr': 0.5, # ground coverage ratio
}
"""
Explanation: Prepare PV array parameters
End of explanation
"""
from pvfactors.geometry import OrderedPVArray
pvarray = OrderedPVArray.fit_from_dict_of_scalars(pvarray_parameters)
"""
Explanation: Create a PV array and its shadows
Import the OrderedPVArray class and create a transformed PV array object using the parameters above
End of explanation
"""
# Plot pvarray shapely geometries
f, ax = plt.subplots(figsize=(10, 3))
pvarray.plot_at_idx(0, ax)
plt.show()
"""
Explanation: Plot the PV array.
Note: the index 0 is passed to the plotting method. We're explaining why a little later in this tutorial.
End of explanation
"""
# New configuration with direct shading
pvarray_parameters.update({'surface_tilt': 80., 'solar_zenith': 75., 'solar_azimuth': 90.})
pvarray_parameters
# Create new PV array
pvarray_w_direct_shading = OrderedPVArray.fit_from_dict_of_scalars(pvarray_parameters)
# Plot pvarray shapely geometries
f, ax = plt.subplots(figsize=(10, 3))
pvarray_w_direct_shading.plot_at_idx(0, ax)
plt.show()
"""
Explanation: As we can see in the plot above:
- the blue lines represent the PV rows
- the gray lines represent the shadows cast by the PV rows on the ground from direct light
- the yellow lines represent the ground areas that don't get any direct shading
- there are additional points on the ground that may seem out of place: but they are called "cut points" and are necessary to calculate view factors. For instance, if you take the cut point located between the second and third shadows (counting from the left), it marks the point after which the leftmost PV row's back side is not able to see the ground anymore
Situation with direct shading
We can also create situations where direct shading happens either on the front or back surface of the PV rows.
End of explanation
"""
# Shaded length on first pv row (leftmost)
l = pvarray_w_direct_shading.ts_pvrows[0].front.shaded_length
print("Shaded length on front surface of leftmost PV row: %.2f m" % l)
# Shaded length on last pv row (rightmost)
l = pvarray_w_direct_shading.ts_pvrows[-1].front.shaded_length
print("Shaded length on front surface of rightmost PV row: %.2f m" %l)
"""
Explanation: We can now see on the plot above that some inter-row shading is happening in the PV array.
It is also very easy to obtain the shadow length on the front surface of the shaded PV rows.
End of explanation
"""
front_illum_ts_surface = pvarray_w_direct_shading.ts_pvrows[0].front.list_segments[0].illum.list_ts_surfaces[0]
coords = front_illum_ts_surface.coords
print("Coords: {}".format(coords))
"""
Explanation: As we can see, the rightmost PV row is not shaded at all.
What are timeseries geometries?
It is important to note that the two most important attributes of the PV array object are ts_pvrows and ts_ground. These contain what we call "timeseries geometries", which are objects that represent the geometry of the PV rows and the ground for all timestamps of the simulation.
For instance here, we can look at the coordinates of the front illuminated timeseries surface of the leftmost PV row.
End of explanation
"""
b1 = coords.b1
b2 = coords.b2
print("b1 coords: {}".format(b1))
"""
Explanation: These are the timeseries line coordinates of the surface, and it is made out of two timeseries point coordinates, b1 and b2 ("b" for boundary).
End of explanation
"""
print("x coords of b1: {}".format(b1.x))
print("y coords of b1: {}".format(b1.y))
"""
Explanation: Each timeseries point is also made of x and y timeseries coordinates, which are just numpy arrays.
End of explanation
"""
|
kazzz24/deep-learning | reinforcement/Q-learning-cart-Copy1.ipynb | mit | import gym
import tensorflow as tf
import numpy as np
"""
Explanation: Deep Q-learning
In this notebook, we'll build a neural network that can learn to play games through reinforcement learning. More specifically, we'll use Q-learning to train an agent to play a game called Cart-Pole. In this game, a freely swinging pole is attached to a cart. The cart can move to the left and right, and the goal is to keep the pole upright as long as possible.
We can simulate this game using OpenAI Gym. First, let's check out how OpenAI Gym works. Then, we'll get into training an agent to play the Cart-Pole game.
End of explanation
"""
# Create the Cart-Pole game environment
env = gym.make('MountainCar-v0')
"""
Explanation: Note: Make sure you have OpenAI Gym cloned into the same directory with this notebook. I've included gym as a submodule, so you can run git submodule --init --recursive to pull the contents into the gym repo.
End of explanation
"""
env.reset()
rewards = []
for _ in range(1000):
env.render()
state, reward, done, info = env.step(env.action_space.sample()) # take a random action
rewards.append(reward)
if done:
rewards = []
env.reset()
env.action_space
"""
Explanation: We interact with the simulation through env. To show the simulation running, you can use env.render() to render one frame. Passing in an action as an integer to env.step will generate the next step in the simulation. You can see how many actions are possible from env.action_space and to get a random action you can use env.action_space.sample(). This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1.
Run the code below to watch the simulation run.
End of explanation
"""
print(rewards[-20:])
"""
Explanation: To shut the window showing the simulation, use env.close().
If you ran the simulation above, we can look at the rewards:
End of explanation
"""
class QNetwork:
def __init__(self, learning_rate=0.01, state_size=2,
action_size=2, hidden_size=10,
name='QNetwork'):
# state inputs to the Q-network
with tf.variable_scope(name):
self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs')
# One hot encode the actions to later choose the Q-value for the action
self.actions_ = tf.placeholder(tf.int32, [None], name='actions')
one_hot_actions = tf.one_hot(self.actions_, action_size)
# Target Q values for training
self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')
# ReLU hidden layers
self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size)
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size)
# Linear output layer
self.output = tf.contrib.layers.fully_connected(self.fc2, action_size,
activation_fn=None)
### Train with loss (targetQ - Q)^2
# output has length 2, for two actions. This next line chooses
# one value from output (per row) according to the one-hot encoded actions.
self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)
self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))
self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
"""
Explanation: The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right.
Q-Network
We train our Q-learning agent using the Bellman Equation:
$$
Q(s, a) = r + \gamma \max{Q(s', a')}
$$
where $s$ is a state, $a$ is an action, and $s'$ is the next state from state $s$ and action $a$.
Before we used this equation to learn values for a Q-table. However, for this game there are a huge number of states available. The state has four values: the position and velocity of the cart, and the position and velocity of the pole. These are all real-valued numbers, so ignoring floating point precisions, you practically have infinite states. Instead of using a table then, we'll replace it with a neural network that will approximate the Q-table lookup function.
<img src="assets/deep-q-learning.png" width=450px>
Now, our Q value, $Q(s, a)$ is calculated by passing in a state to the network. The output will be Q-values for each available action, with fully connected hidden layers.
<img src="assets/q-network.png" width=550px>
As I showed before, we can define our targets for training as $\hat{Q}(s,a) = r + \gamma \max{Q(s', a')}$. Then we update the weights by minimizing $(\hat{Q}(s,a) - Q(s,a))^2$.
For this Cart-Pole game, we have four inputs, one for each value in the state, and two outputs, one for each action. To get $\hat{Q}$, we'll first choose an action, then simulate the game using that action. This will get us the next state, $s'$, and the reward. With that, we can calculate $\hat{Q}$ then pass it back into the $Q$ network to run the optimizer and update the weights.
Below is my implementation of the Q-network. I used two fully connected layers with ReLU activations. Two seems to be good enough, three might be better. Feel free to try it out.
End of explanation
"""
from collections import deque
class Memory():
def __init__(self, max_size = 1000):
self.buffer = deque(maxlen=max_size)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
idx = np.random.choice(np.arange(len(self.buffer)),
size=batch_size,
replace=False)
return [self.buffer[ii] for ii in idx]
"""
Explanation: Experience replay
Reinforcement learning algorithms can have stability issues due to correlations between states. To reduce correlations when training, we can store the agent's experiences and later draw a random mini-batch of those experiences to train on.
Here, we'll create a Memory object that will store our experiences, our transitions $<s, a, r, s'>$. This memory will have a maxmium capacity, so we can keep newer experiences in memory while getting rid of older experiences. Then, we'll sample a random mini-batch of transitions $<s, a, r, s'>$ and train on those.
Below, I've implemented a Memory object. If you're unfamiliar with deque, this is a double-ended queue. You can think of it like a tube open on both sides. You can put objects in either side of the tube. But if it's full, adding anything more will push an object out the other side. This is a great data structure to use for the memory buffer.
End of explanation
"""
train_episodes = 1000 # max number of episodes to learn from
max_steps = 200 # max steps in an episode
gamma = 0.99 # future reward discount
# Exploration parameters
explore_start = 1.0 # exploration probability at start
explore_stop = 0.01 # minimum exploration probability
decay_rate = 0.0001 # exponential decay rate for exploration prob
# Network parameters
hidden_size = 64 # number of units in each Q-network hidden layer
learning_rate = 0.0001 # Q-network learning rate
# Memory parameters
memory_size = 10000 # memory capacity
batch_size = 20 # experience mini-batch size
pretrain_length = batch_size # number experiences to pretrain the memory
tf.reset_default_graph()
mainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate)
"""
Explanation: Exploration - Exploitation
To learn about the environment and rules of the game, the agent needs to explore by taking random actions. We'll do this by choosing a random action with some probability $\epsilon$ (epsilon). That is, with some probability $\epsilon$ the agent will make a random action and with probability $1 - \epsilon$, the agent will choose an action from $Q(s,a)$. This is called an $\epsilon$-greedy policy.
At first, the agent needs to do a lot of exploring. Later when it has learned more, the agent can favor choosing actions based on what it has learned. This is called exploitation. We'll set it up so the agent is more likely to explore early in training, then more likely to exploit later in training.
Q-Learning training algorithm
Putting all this together, we can list out the algorithm we'll use to train the network. We'll train the network in episodes. One episode is one simulation of the game. For this game, the goal is to keep the pole upright for 195 frames. So we can start a new episode once meeting that goal. The game ends if the pole tilts over too far, or if the cart moves too far the left or right. When a game ends, we'll start a new episode. Now, to train the agent:
Initialize the memory $D$
Initialize the action-value network $Q$ with random weights
For episode = 1, $M$ do
For $t$, $T$ do
With probability $\epsilon$ select a random action $a_t$, otherwise select $a_t = \mathrm{argmax}_a Q(s,a)$
Execute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$
Store transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$
Sample random mini-batch from $D$: $<s_j, a_j, r_j, s'_j>$
Set $\hat{Q}j = r_j$ if the episode ends at $j+1$, otherwise set $\hat{Q}_j = r_j + \gamma \max{a'}{Q(s'_j, a')}$
Make a gradient descent step with loss $(\hat{Q}_j - Q(s_j, a_j))^2$
endfor
endfor
Hyperparameters
One of the more difficult aspects of reinforcememt learning are the large number of hyperparameters. Not only are we tuning the network, but we're tuning the simulation.
End of explanation
"""
# Initialize the simulation
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
memory = Memory(max_size=memory_size)
# Make a bunch of random actions and store the experiences
for ii in range(pretrain_length):
# Uncomment the line below to watch the simulation
# env.render()
# Make a random action
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
if done:
# The simulation fails so no next state
next_state = np.zeros(state.shape)
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
"""
Explanation: Populate the experience memory
Here I'm re-initializing the simulation and pre-populating the memory. The agent is taking random actions and storing the transitions in memory. This will help the agent with exploring the game.
End of explanation
"""
# Now train with experiences
saver = tf.train.Saver()
rewards_list = []
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
step = 0
for ep in range(1, train_episodes):
total_reward = 0
t = 0
while t < max_steps:
step += 1
# Uncomment this next line to watch the training
#env.render()
# Explore or Exploit
explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step)
if explore_p > np.random.rand():
# Make a random action
action = env.action_space.sample()
else:
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
total_reward += reward
if done:
# the episode ends so no next state
next_state = np.zeros(state.shape)
t = max_steps
print('Episode: {}'.format(ep),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_p))
rewards_list.append((ep, total_reward))
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
t += 1
# Sample mini-batch from memory
batch = memory.sample(batch_size)
states = np.array([each[0] for each in batch])
actions = np.array([each[1] for each in batch])
rewards = np.array([each[2] for each in batch])
next_states = np.array([each[3] for each in batch])
# Train network
target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})
# Set target_Qs to 0 for states where episode ends
episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1)
target_Qs[episode_ends] = (0, 0)
targets = rewards + gamma * np.max(target_Qs, axis=1)
loss, _ = sess.run([mainQN.loss, mainQN.opt],
feed_dict={mainQN.inputs_: states,
mainQN.targetQs_: targets,
mainQN.actions_: actions})
saver.save(sess, "checkpoints/cartpole.ckpt")
"""
Explanation: Training
Below we'll train our agent. If you want to watch it train, uncomment the env.render() line. This is slow because it's rendering the frames slower than the network can train. But, it's cool to watch the agent get better at the game.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
eps, rews = np.array(rewards_list).T
smoothed_rews = running_mean(rews, 10)
plt.plot(eps[-len(smoothed_rews):], smoothed_rews)
plt.plot(eps, rews, color='grey', alpha=0.3)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
"""
Explanation: Visualizing training
Below I'll plot the total rewards for each episode. I'm plotting the rolling average too, in blue.
End of explanation
"""
test_episodes = 10
test_max_steps = 400
env.reset()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
for ep in range(1, test_episodes):
t = 0
while t < test_max_steps:
env.render()
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
if done:
t = test_max_steps
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
state = next_state
t += 1
env.close()
"""
Explanation: Testing
Let's checkout how our trained agent plays the game.
End of explanation
"""
|
atlury/deep-opencl | DL0110EN/2.6.3.multi-target_linear_regression.ipynb | lgpl-3.0 | from torch import nn
import torch
Set the random seed:
torch.manual_seed(1)
"""
Explanation: <div class="alert alert-block alert-info" style="margin-top: 20px">
<a href="http://cocl.us/pytorch_link_top"><img src = "http://cocl.us/Pytorch_top" width = 950, align = "center"></a>
<img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 200, align = "center">
<h1 align=center><font size = 5>Linear Regression with Multiple Outputs </font></h1>
# Table of Contents
In this lab, we will review how to make a prediction for Linear Regression with Multiple Output.
<div class="alert alert-block alert-info" style="margin-top: 20px">
<li><a href="#ref2">Build Custom Modules </a></li>
<br>
<p></p>
Estimated Time Needed: <strong>15 min</strong>
</div>
<hr>
<a id="ref1"></a>
<h2 align=center>Class Linear </h2>
End of explanation
"""
class linear_regression(nn.Module):
def __init__(self,input_size,output_size):
super(linear_regression,self).__init__()
self.linear=nn.Linear(input_size,output_size)
def forward(self,x):
yhat=self.linear(x)
return yhat
"""
Explanation: Set the random seed:
End of explanation
"""
model=linear_regression(2,2)
"""
Explanation: create a linear regression object, as our input and output will be two we set the parameters accordingly
End of explanation
"""
list(model.parameters())
"""
Explanation: we can use the diagram to represent the model or object
<img src = "https://ibm.box.com/shared/static/icmwnxru7nytlhnq5x486rffea9ncpk7.png" width = 600, align = "center">
we can see the parameters
End of explanation
"""
x=torch.tensor([[1.0,3.0]])
"""
Explanation: we can create a tensor with two rows representing one sample of data
End of explanation
"""
yhat=model(x)
yhat
"""
Explanation: we can make a prediction
End of explanation
"""
X=torch.tensor([[1.0,1.0],[1.0,2.0],[1.0,3.0]])
"""
Explanation: each row in the following tensor represents a different sample
End of explanation
"""
Yhat=model(X)
Yhat
"""
Explanation: we can make a prediction using multiple samples
End of explanation
"""
|
napjon/krisk | notebooks/themes-colors.ipynb | bsd-3-clause | import krisk.plot as kk
import pandas as pd
# Use this when you want to nbconvert the notebook (used by nbviewer)
from krisk import init_notebook; init_notebook()
df = pd.read_csv('../krisk/tests/data/gapminderDataFiveYear.txt', sep='\t').sample(50)
"""
Explanation: With krisk, you also can customize color and themes.
End of explanation
"""
p = kk.bar(df,'year',c='continent',stacked=True)
p
"""
Explanation: Themes
There are currently six possible themes in krisk, as supported by Echarts. Here I will show you simple chart using each of theme.
For more intutitive about them, please visit http://echarts.baidu.com/download-theme.html
Normal
End of explanation
"""
p.set_theme('vintage')
"""
Explanation: Vintage
End of explanation
"""
p.set_theme('dark')
"""
Explanation: Dark
End of explanation
"""
p.set_theme('macarons')
"""
Explanation: Macarons
End of explanation
"""
p.set_theme('infographic')
"""
Explanation: Infographic
End of explanation
"""
p.set_theme('roma')
"""
Explanation: Roma
End of explanation
"""
p.set_theme('shine')
"""
Explanation: Shine
End of explanation
"""
pallete = ['Navy','#FF0000','rgb(205,92,92)', '#65c3bf','hsl(60, 100%, 87%)']
p.set_color(background='Aqua', palette=pallete)
"""
Explanation: Colors (Palette and Background)
Krisk doesn't have existing based colormap. But you can feed CSS Color Codes, hex, or RGB colors manually.
End of explanation
"""
import seaborn as sns
palette_sns1 = sns.color_palette('muted').as_hex()
p.set_color(palette=palette_sns1)
"""
Explanation: You also can using existing palettes provided by visualization libraries you already know. Here I will use libraries like Seaborn, Colorlover, and Bokeh.
Seaborn
End of explanation
"""
palette_sns2 = sns.color_palette('YlGnBu').as_hex()
p.set_color(palette=palette_sns2)
"""
Explanation: Seaborn also nicely integrate colormap from matplotlib
End of explanation
"""
import colorlover as cl
cl2 = cl.to_hsl( cl.scales['3']['div']['RdYlBu'] )
p.set_color(palette=cl2)
"""
Explanation: Colorlover
End of explanation
"""
import bokeh.palettes as bp
import bokeh.colors as bc
p.set_color(background=bc.aliceblue.to_hex(),palette=bp.PuBuGn6)
"""
Explanation: Bokeh
End of explanation
"""
|
raghakot/keras-vis | applications/self_driving/visualize_attention.ipynb | mit | import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
from model import build_model, FRAME_W, FRAME_H
from keras.preprocessing.image import img_to_array
from vis.utils import utils
model = build_model()
model.load_weights('weights.hdf5')
img = utils.load_img('images/left.png', target_size=(FRAME_H, FRAME_W))
plt.imshow(img)
# Convert to BGR, create input with batch_size: 1.
bgr_img = utils.bgr2rgb(img)
img_input = np.expand_dims(img_to_array(bgr_img), axis=0)
pred = model.predict(img_input)[0][0]
print('Predicted {}'.format(pred))
"""
Explanation: Visualizing attention on self driving car
So far we have seen many examples of attention and activation maximization on Dense layers that outputs a probability distribution. What if we have a regression model instead?
In this example, we will use a pretrained self driving car model that predicts the steering angle output. This model is borrowed from https://github.com/experiencor/self-driving-toy-car. Here is the model in action.
<a href="https://www.youtube.com/watch?v=-v6q2dNZTU8" rel="some text"><p align="center"></p></a>
Lets load the model, weights etc and make a prediction.
End of explanation
"""
import matplotlib.cm as cm
from vis.visualization import visualize_saliency, overlay
titles = ['right steering', 'left steering', 'maintain steering']
modifiers = [None, 'negate', 'small_values']
for i, modifier in enumerate(modifiers):
heatmap = visualize_saliency(model, layer_idx=-1, filter_indices=0,
seed_input=bgr_img, grad_modifier=modifier)
plt.figure()
plt.title(titles[i])
# Overlay is used to alpha blend heatmap onto img.
jet_heatmap = np.uint8(cm.jet(heatmap)[..., :3] * 255)
plt.imshow(overlay(img, jet_heatmap, alpha=0.7))
"""
Explanation: Looks good. The negative value is indicative of left steering.
Attention
By default, visualize_saliency and visualize_cam use positive gradients which shows what parts of the image increase the output value. This makes sense for categorical outputs. For regression cases, it is more interesting to see what parts of the image cause the output to:
Increase
Decrease
Maintain
the current predicted value. This is where grad_modifiers shine.
To visualize decrease, we need to consider negative gradients that indicate the decrease. To treat them as positive values (as used by visualization), we need to to negate the gradients. This is easily done by using grad_modifier='negate'.
To visualize what is responsible for current output, we need to highlight small gradients (either positive or negative). This can be done by using a grad modifier that performs grads = np.abs(1. / grads) to magnify small positive or negative values. Alternatively, we can use grad_modifier='small_values' which does the same thing.
Lets use this knowledge to visualize the parts of the image that cause the car to increase, decrease, maintain the predicted steering.
End of explanation
"""
from vis.visualization import visualize_cam
for i, modifier in enumerate(modifiers):
heatmap = visualize_cam(model, layer_idx=-1, filter_indices=0,
seed_input=bgr_img, grad_modifier=modifier)
plt.figure()
plt.title(titles[i])
# Overlay is used to alpha blend heatmap onto img.
jet_heatmap = np.uint8(cm.jet(heatmap)[..., :3] * 255)
plt.imshow(overlay(img, jet_heatmap, alpha=0.7))
"""
Explanation: That was anti-climactic. Lets try grad-CAM. We know that vanilla saliency can be noisy.
End of explanation
"""
img = utils.load_img('images/blank.png', target_size=(FRAME_H, FRAME_W))
plt.imshow(img)
# Convert to BGR, create input with batch_size: 1.
bgr_img = utils.bgr2rgb(img)
img_input = np.expand_dims(img_to_array(bgr_img), axis=0)
img_input.shape
pred = model.predict(img_input)[0][0]
print('Predicted {}'.format(pred))
"""
Explanation: This makes sense. In order to turn right, the left part of the lane contributes the most towards it. I am guessing it is attending to the fact that it curves left and so changing it to curve right would make the network increase the steering angle.
The maintain_steering visualization shows that its current decision is mostly due to the object in the right corner. This is an undesirable behavior which visualizations like these can help you uncover.
The left steering case is intuitive as well. Interestingly, the objects in the room in the far right also provide it a cue to turn left. This means that, even without the lane marker, the network will probably turn away from obstacles. Lets put this hypothesis to test. Using my awesome photo editing skills, I will remove the lane marker.
Lets see the predicted output first.
End of explanation
"""
# We want to use grad_modifier='small_values' to see what is resposible for maintaining current prediction.
heatmap = visualize_cam(model, layer_idx=-1, filter_indices=0,
seed_input=bgr_img, grad_modifier='small_values')
jet_heatmap = np.uint8(cm.jet(heatmap)[..., :3] * 255)
plt.imshow(overlay(img, jet_heatmap, alpha=0.7))
"""
Explanation: As predicted, it has a left steering output. Lets find out if those objects have anything to do with it.
End of explanation
"""
|
probml/pyprobml | notebooks/book1/03/prob.ipynb | mit | import os
import time
import numpy as np
np.set_printoptions(precision=3)
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
import sklearn
import scipy.stats as stats
import scipy.optimize
import seaborn as sns
sns.set(style="ticks", color_codes=True)
import pandas as pd
pd.set_option("precision", 2) # 2 decimal places
pd.set_option("display.max_rows", 20)
pd.set_option("display.max_columns", 30)
pd.set_option("display.width", 100) # wide windows
"""
Explanation: <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/prob.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Probability
In this notebook, we illustrate some basic concepts from probability theory using Python code.
End of explanation
"""
def normalize(x):
return x / np.sum(x)
def posterior_covid(observed, prevalence=None, sensitivity=None):
# observed = 0 for negative test, 1 for positive test
# hidden state = 0 if no-covid, 1 if have-covid
if sensitivity is None:
sensitivity = 0.875
specificity = 0.975
TPR = sensitivity
FNR = 1 - TPR
TNR = specificity
FPR = 1 - TNR
# likelihood(hidden, obs)
likelihood_fn = np.array([[TNR, FPR], [FNR, TPR]])
# prior(hidden)
if prevalence is None:
prevalence = 0.1
prior = np.array([1 - prevalence, prevalence])
likelihood = likelihood_fn[:, observed].T
posterior = normalize(prior * likelihood)
return posterior
"""
Explanation: Software libraries
There are several software libraries that implement standard probability distributions, and functions for manipulating them (e.g., sampling, fitting). We list some below.
scipy.stats We illustrate how to use this below.
Tensorflow probability (TFP)
Similar API to scipy.stats.
Distrax JAX version of TFP.
Pytorch distributions library. Similar to TFP.
NumPyro distributions library has a similar interface to PyTorch distributions, but uses JAX as the backend.
In this notebook, we mostly focus on scipy.stats.
Basics of Probability theory
What is probability?
We will not go into mathematical detail, but focus on intuition.
Two main "schools of thought"
Bayesian probability = degree of belief
$p(heads=1)=0.5$ means you think the event that a particular coin will land heads is 50% likely.
Frequentist probability = long run frequencies
$p(heads=1)=0.5$ means that the empirical fraction of times this event will occur across infinitely repeated trials is 50%
In practice, the philosophy does not matter much, since both interpretations must satisfy the same basic axioms
Random variables and their distributions
Let $X$ be a (discrete) random variable (RV) with $K$ possible values $\mathcal{X} = {1,...,K}$.
Let $X=x$ be the event that $X$ has value $x$, for some state $x \in \cal{X}$.
We require $0 \leq p(X=x) \leq 1$
We require
$$\sum_{x \in \cal{X}} p(X=x) = 1$$
Let $p(X) = [p(X=1), …, p(X=K)]$ be the distribution or probability mass function (pmf) for RV $X$.
We can generalize this to continuous random variables, which have an infinite number of possible states, using a probability density function (pdf) which satisfies
$$
\int_{x \in \cal{X}} p(X=x) dx = 1
$$
Conjunction and disjunction of events
The probability of events $X=x$ AND $Y=y$ is denoted $p(X=x \land Y=y)$ or just $p(X=x,Y=y)$.
If two RVs are independent, then
$$p(X, Y) = p(X) * p(Y)$$.
The probability of event $X=x$ OR $Y=y$ is
$$p(X=x \lor Y=y) = p(X=x) + p(Y=y) - p(X=x \land Y=y)$$
For disjoint events (that cannot co-occur), this becomes
$$p(X=x \lor Y=y) = p(X=x) + p(Y=y)$$
Conditional probability, sum rule, product rule, Bayes rule
The conditional probability of Y=y given X=x is defined to be
$$ p(Y=y|X=x) = \frac{p(X=x,Y=y)}{p(X=x)} $$
Hence we derive the product rule
$$
\begin{align}
p(X=x, Y=y) &= p(Y=y|X=x) * p(X=x)\
&= p(X=x|Y=y) * p(Y=y)
\end{align}
$$
If $X$ and $Y$ are independent, then $p(Y|X)=p(Y)$ and $p(X|Y)=p(X)$, so
$p(X,Y)=p(X) p(Y)$.
The marginal probability of $X=x$ is given by the sum rule
$$ p(X=x) = \sum_y p(X=x, Y=y)$$
Hence we derive Bayes' rule
$$
\begin{align}
p(Y=y|X=x) &= p(X=x,Y=y) / p(X=x)\
&=\frac{p(X=x|Y=y) * p(Y=y)}
{\sum_{y'} p(X=x|Y=y) * p(Y=y)}
\end{align}
$$
Bayesian inference
Bayes rule is often used to compute a distribution over possible values of a hidden variable or hypothesis $h \in \cal{H}$ after observing some evidence $Y=y$. We can write this as follows:
$$
\begin{align}
p(H=h|Y=y) &= \frac{p(H=h) p(Y=y|H=h)}{p(Y=y)} \
\text{posterior}(h|y) &= \frac{\text{prior}(h) * \text{likelihood}(y|h)}{\text{marginal-likelihood}(y)}
\end{align}
$$
The prior encodes what we believe about the state before we see any data.
The likelihood is the probability of observing the data given each possible hidden state.
The posterior is our new belief state, after seeing the data.
The marginal likelihood is a normalization constant, independent of the hidden state, so can usually be ignored.
Applying Bayes rule to infer a hidden quantity from one or more observations is called Bayesian inference or posterior inference. (It used to be called inverse probability, since it reasons backwards from effects to causes.)
Example: Bayes rule for COVID diagnosis
Consider estimating if someone has COVID $H=1$ or not $H=0$ on the basis of a PCR test. The test can either return a positive result $Y=1$ or a negative result $Y=0$. The reliability of the test is given by the following observation model.
Using data from https://www.nytimes.com/2020/08/04/science/coronavirus-bayes-statistics-math.html, we set sensitivity to 87.5\% and the specificity to 97.5\%.
We also need to specify the prior probability $p(H=1)$; this is known as the prevalence. This varies over time and place, but let's pick $p(H=1)=0.1$ as a reasonable estimate.
If you test positive:
\begin{align}
p(H=1|Y=1)
&= \frac{p(Y=1|H=1) p(H=1)}
{p(Y=1|H=1) p(H=1) + p(Y=1|H=0) p(H=0)}
= 0.795
\end{align}
If you test negative:
\begin{align}
p(H=1|Y=0)
&= \frac{p(Y=0|H=1) p(H=1)}
{p(Y=0|H=1) p(H=1) + p(Y=0|H=0) p(H=0)}
=0.014
\end{align}
Code to reproduce the above.
End of explanation
"""
print(posterior_covid(1)[1] * 100)
print(posterior_covid(0)[1] * 100)
"""
Explanation: For a prevalence of $p(H=1)=0.1$
End of explanation
"""
print(posterior_covid(1, 0.01)[1] * 100) # positive test
print(posterior_covid(0, 0.01)[1] * 100) # negative test
pop = 100000
infected = 0.01 * pop
sens = 87.5 / 100
spec = 97.5 / 100
FPR = 1 - spec
FNR = 1 - sens
print([FPR, FNR])
true_pos = sens * infected
false_pos = FPR * (pop - infected)
num_pos = true_pos + false_pos
posterior = true_pos / num_pos
print([infected, true_pos, false_pos, num_pos, posterior])
"""
Explanation: For a prevalence of $p(H=1)=0.01$
End of explanation
"""
from scipy.stats import norm
rv = norm(0, 1) # standard normal
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
X = np.linspace(-3, 3, 500)
ax[0].plot(X, rv.pdf(X))
ax[0].set_title("Gaussian pdf")
ax[1].plot(X, rv.cdf(X))
ax[1].set_title("Gaussian cdf")
plt.show()
plt.figure()
plt.plot(X, rv.pdf(X))
plt.title("Gaussian pdf")
plt.figure()
plt.plot(X, rv.cdf(X))
plt.title("Gaussian cdf")
plt.show()
# Samples
np.random.seed(42)
mu = 1.1
sigma = 0.1
dist = norm(loc=mu, scale=sigma) # create "frozen" distribution
N = 10
x = dist.rvs(size=N) # draw N random samples
print(x.shape)
print(x)
np.random.seed(42)
x2 = norm(mu, sigma).rvs(size=N)
assert np.allclose(x, x2)
# pdf, cdf, inverse cdf
logprob = dist.logpdf(x) # evaluate log probability of each sample
print(logprob.shape)
p = dist.cdf(x)
x3 = dist.ppf(p) # inverse CDF
assert np.allclose(x, x3)
"""
Explanation: Univariate distributions
Univariate Gaussian (normal) <a class="anchor" id="scipy-unigauss"></a>
End of explanation
"""
from scipy.stats import gamma
x = np.linspace(0, 7, 100)
b = 1
plt.figure()
for a in [1, 1.5, 2]:
y = gamma.pdf(x, a, scale=1 / b, loc=0)
plt.plot(x, y)
plt.legend(["a=%.1f, b=1" % a for a in [1, 1.5, 2]])
plt.title("Gamma(a,b) distributions")
# save_fig('gammaDistb1.pdf')
plt.show()
"""
Explanation: Gamma distribution <a class="anchor" id="scipy-gamma"></a>
End of explanation
"""
import collections
import re
import urllib
url = "https://raw.githubusercontent.com/probml/pyprobml/master/data/timemachine.txt"
bytes = urllib.request.urlopen(url).read()
string = bytes.decode("utf-8")
words = string.split()
print(words[:10])
words = [re.sub("[^A-Za-z]+", " ", w.lower()) for w in words]
print(words[:10])
# Convert sequence of words into sequence of n-grams for different n
# Unigrams
wseq = words # [tk for st in raw_dataset for tk in st]
print("First 10 unigrams\n", wseq[:10])
# Bigrams
word_pairs = [pair for pair in zip(wseq[:-1], wseq[1:])]
print("First 10 bigrams\n", word_pairs[:10])
# Trigrams
word_triples = [triple for triple in zip(wseq[:-2], wseq[1:-1], wseq[2:])]
print("First 10 trigrams\n", word_triples[:10])
# ngram statistics
counter = collections.Counter(wseq)
counter_pairs = collections.Counter(word_pairs)
counter_triples = collections.Counter(word_triples)
wordcounts = [count for _, count in counter.most_common()]
bigramcounts = [count for _, count in counter_pairs.most_common()]
triplecounts = [count for _, count in counter_triples.most_common()]
print("Most common unigrams\n", counter.most_common(10))
print("Most common bigrams\n", counter_pairs.most_common(10))
print("Most common trigrams\n", counter_triples.most_common(10))
# Word frequency is linear on log-log scale
plt.figure()
plt.loglog(wordcounts, label="word counts")
plt.ylabel("log frequency")
plt.xlabel("log rank")
# Prediction from Zipf's law, using manually chosen parameters.
# We omit the first 'skip' words, which don't fit the prediction well.
skip = 10.0
x = np.arange(skip, len(wordcounts))
N = np.sum(wordcounts)
kappa = 0.1
a = -1
y = kappa * np.power(x, a) * N # predicted frequency for word with rank x
plt.loglog(x, y, label="linear prediction")
plt.legend()
plt.show()
# The number of unique n-grams is smaller for larger n.
# But n-gram statistics also exhibit a power law.
plt.figure()
plt.loglog(wordcounts, label="word counts")
plt.loglog(bigramcounts, label="bigram counts")
plt.loglog(triplecounts, label="triple counts")
plt.legend()
plt.show()
"""
Explanation: Zipf's law <a class="anchor" id="zipf"></a>
In this section, we study the empirical word frequencies derived from H. G. Wells' book The time machine.
Our code is based on https://github.com/d2l-ai/d2l-en/blob/master/chapter_recurrent-neural-networks/lang-model.md
End of explanation
"""
from scipy.stats import multivariate_normal as mvn
D = 5
np.random.seed(42)
mu = np.random.randn(D)
A = np.random.randn(D, D)
Sigma = np.dot(A, A.T)
dist = mvn(mu, Sigma)
X = dist.rvs(size=10)
print(X.shape)
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
names = ["Full", "Diag", "Spherical"]
mu = [0, 0]
Covs = {"Full": [[2, 1.8], [1.8, 2]], "Diag": [[1, 0], [0, 3]], "Spherical": [[1, 0], [0, 1]]}
N = 100
points = np.linspace(-5, 5, N)
X, Y = np.meshgrid(points, points)
xs = X.reshape(-1)
ys = Y.reshape(-1)
grid = np.vstack([xs, ys]).T # N^2 * 2
fig = plt.figure(figsize=(10, 7))
fig.subplots_adjust(hspace=0.5, wspace=0.1)
fig_counter = 1
for i in range(len(Covs)):
name = names[i]
Sigma = Covs[name]
ps = mvn(mu, Sigma).pdf(grid)
P = ps.reshape((N, N))
ax = fig.add_subplot(3, 2, fig_counter)
ax.contour(X, Y, P)
ax.axis("equal") # make circles look circular
ax.set_title(name)
fig_counter = fig_counter + 1
ax = fig.add_subplot(3, 2, fig_counter, projection="3d")
ax.plot_surface(X, Y, P, rstride=2, cstride=2)
ax.set_title(name)
fig_counter = fig_counter + 1
plt.show()
"""
Explanation: Multivariate Gaussian (normal) <a class="anchor" id="scipy-multigauss"></a>
End of explanation
"""
sigma_x1 = 1
sigmas_x2 = [1, 2]
rhos = [-0.90, -0.5, 0, 0.5, 0.90]
k, l = np.mgrid[-5:5:0.1, -5:5:0.1]
pos = np.empty(k.shape + (2,))
pos[:, :, 0] = k
pos[:, :, 1] = l
f, ax = plt.subplots(len(sigmas_x2), len(rhos), sharex=True, sharey=True, figsize=(12, 6), constrained_layout=True)
for i in range(2):
for j in range(5):
sigma_x2 = sigmas_x2[i]
rho = rhos[j]
cov = [[sigma_x1**2, sigma_x1 * sigma_x2 * rho], [sigma_x1 * sigma_x2 * rho, sigma_x2**2]]
rv = stats.multivariate_normal([0, 0], cov)
ax[i, j].contour(k, l, rv.pdf(pos))
ax[i, j].set_xlim(-8, 8)
ax[i, j].set_ylim(-8, 8)
ax[i, j].set_yticks([-5, 0, 5])
ax[i, j].plot(0, 0, label=f"$\\sigma_{{x2}}$ = {sigma_x2:3.2f}\n$\\rho$ = {rho:3.2f}", alpha=0)
ax[i, j].legend()
f.text(0.5, -0.05, "x_1", ha="center", fontsize=18)
f.text(-0.05, 0.5, "x_2", va="center", fontsize=18, rotation=0)
"""
Explanation: Illustrate correlation coefficient.
Code is from Bayesian Analysis with Python, ch. 3
End of explanation
"""
|
environmentalscience/essm | docs/examples/examples_numerics.ipynb | gpl-2.0 | from IPython.display import display
from sympy import init_printing, latex
init_printing()
from sympy.printing import StrPrinter
StrPrinter._print_Quantity = lambda self, expr: str(expr.abbrev) # displays short units (m instead of meter)
%run -i 'test_equation_definitions.py'
"""
Explanation: Use examples for numerical calculations
This jupyter notebook can be found at:
https://github.com/environmentalscience/essm/blob/master/docs/examples/examples_numerics.ipynb
Below, we will import variable and equation defintions that were previously exported from api_features.ipynb
by running the file test_equation_definitions.py:
End of explanation
"""
for eq in Equation.__registry__.keys():
print(eq.definition.name + ': ' + str(eq))
"""
Explanation: Numerical evaluations
See here for detailed instructions on how to turn sympy expressions into code: https://docs.sympy.org/latest/modules/codegen.html
We will first list all equations defined in this worksheet:
End of explanation
"""
def print_dict(vdict, list_vars=None):
"""Print values and units of variables in vdict."""
if not list_vars:
list_vars = vdict.keys()
for var1 in list_vars:
unit1 = var1.definition.unit
if unit1 == 1:
unit1 = ''
if vdict[var1] is not None:
print('{0}: {1} {2}'.format(var1.name, str(vdict[var1]), str(unit1)))
vdict = Variable.__defaults__.copy()
print_dict(vdict)
"""
Explanation: Substitution of equations and values into equations
The easiest way is to define a dictionary with all variables we want to substitute as keys. We start with the default variables and then add more. First, however, we will define a function to display the contents of a dictionary:
End of explanation
"""
from essm.variables.utils import subs_eq
subs_eq(eq_Le, [eq_alphaa, eq_Dva])
"""
Explanation: We can substitute a range of equations into each other by using the custom function subs_eq:
End of explanation
"""
vdict[T_a] = 300.
subs_eq(eq_Le, [eq_alphaa, eq_Dva], vdict)
"""
Explanation: We can also use subs_eq to substitute equations into each other and a dictionary with values. We will first add an entry for T_a into the dictionary and then substitute:
End of explanation
"""
#import theano
from sympy.printing.theanocode import theano_function
import numpy as np
"""
Explanation: Evaluation of equations for long lists of variable sets
Substitution of variables into equations takes a lot of time if they need to be evaluated for a large number of variables. We can use theano to speed this up:
End of explanation
"""
npoints = 10000
xmin = 290.
xmax = 310.
Tvals = np.arange(xmin, xmax, (xmax - xmin)/npoints)
xmin = 0.1
xmax = 0.5
nvals = np.arange(xmin, xmax, (xmax-xmin)/npoints)
%%time
# looping
expr = eq_ideal_gas_law.rhs.subs(Variable.__defaults__)
resvals0 = []
for i in range(len(Tvals)):
resvals0.append(expr.subs({T_g: Tvals[i], n_g: nvals[i]}))
%%time
# Using theano
f1 = theano_function([T_g, n_g], [eq_ideal_gas_law.rhs.subs(Variable.__defaults__)], dims={T_g:1, n_g:1})
resvals1 = f1(Tvals,nvals)
list(resvals0) == list(resvals1)
"""
Explanation: We will now create two long lists of values representing T_g and n_g respectively and show how long it takes to compute ideal gas law values.
End of explanation
"""
from sympy import nsolve
vdict = Variable.__defaults__.copy()
vdict[Pr] = 0.71
vdict[Re_c] = 3000.
vdict[Nu] = 1000.
expr = eq_Nu_forced_all.subs(vdict)
nsolve(expr, 1000.)
"""
Explanation: Both approaches give identical results, but theano_function makes it a lot faster.
Numerical solution
Some equations cannot be solved analytically for a given variable, e.g. eq_Nu_forced_all cannot be solved analytically for Re if Nu is given, so we can use numerical solvers instead:
End of explanation
"""
npoints = 100
xmin = 1000.
xmax = 1200.
Nuvals = np.arange(xmin, xmax, (xmax - xmin)/npoints)
%%time
# Solving for a range of Nu values
vdict = Variable.__defaults__.copy()
vdict[Pr] = 0.71
vdict[Re_c] = 3000.
resvals = []
for Nu1 in Nuvals:
vdict[Nu] = Nu1
resvals.append(nsolve(eq_Nu_forced_all.subs(vdict), 1000.))
"""
Explanation: Now applying to a long list of Nu-values:
End of explanation
"""
import scipy.optimize as sciopt
vdict = Variable.__defaults__.copy()
vdict[Pr] = 0.71
vdict[Re_c] = 3000.
expr = eq_Nu_forced_all.subs(vdict)
expr1 = expr.rhs - expr.lhs
fun_tf = theano_function([Re, Nu], [expr1], dims={Nu:1, Re:1})
x0vals = np.full(Nuvals.shape, fill_value=2000.) # array of same shape as Nuvals, with initial guess
%%time
# Solving for a range of Nu values
resvals1 = sciopt.fsolve(fun_tf, args=Nuvals, x0=x0vals)
np.mean(abs((resvals - resvals1)/resvals))
"""
Explanation: We will now again use a theano function to make it faster. First we import optimize from scipy and preapre the theano_function:
End of explanation
"""
npoints = 1000
xmin = 1000.
xmax = 1200.
Nuvals = np.arange(xmin, xmax, (xmax - xmin)/npoints)
x0vals = np.full(Nuvals.shape, fill_value=2000.)
%%time
# Solving for a range of Nu values
resvals1 = sciopt.fsolve(fun_tf, args=Nuvals, x0=x0vals)
"""
Explanation: Using theano and scipy makes it 2 orders of magnitude faster and the results are different only by 10$^{-10}$%!
Note, however, that scipy gets slowed down for large arrays, so it is more efficient to re-run it repreatedly with subsections of the arra:
End of explanation
"""
# Solving for a range of Nu values
imax = len(Nuvals)
i0 = 0
idiff = 100
i1 = i0
resvals2 = []
while i1 < imax - 1:
i0 = i1 # note that resvals[0:2] + resvals[2:4] = resvals[0:4]
i1 = min(i0+idiff, imax)
resvals0 = Nuvals[i0:i1]
resvals2 = np.append(resvals2,resvals0)
print(list(resvals2) == list(Nuvals))
"""
Explanation: We will now test that we can process Nuvals bit by bit and re-create it consistently:
End of explanation
"""
%%time
# Solving for a range of Nu values
imax = len(Nuvals)
i0 = 0
idiff = 100
i1 = i0
resvals2 = []
while i1 < imax - 1:
i0 = i1 # note that resvals[0:2] + resvals[2:4] = resvals[0:4]
i1 = min(i0+idiff, imax)
resvals0 = sciopt.fsolve(fun_tf, args=Nuvals[i0:i1], x0=x0vals[i0:i1])
resvals2 = np.append(resvals2,resvals0)
np.mean(abs((resvals1 - resvals2)/resvals1))
"""
Explanation: Now we will run fsolve for portions of Nuvals bit by bit:
End of explanation
"""
from sympy.utilities.autowrap import autowrap
from sympy import symbols
x, y, z = symbols('x y z')
expr = ((x - y + z)**(13)).expand()
autowrap_func = autowrap(expr)
%%time
autowrap_func(1, 4, 2)
%%time
expr.subs({x:1, y:4, z:2})
"""
Explanation: It is strange that resvals1 and resvals2 are different at all, but anyway, it is clear that slicing the data in relatively small portions is important to keep scipy.optimize.fsolve time-efficient.
Generate code from sympy expressions and execute
Need to install gfortran system-wide first!
End of explanation
"""
from sympy.utilities.autowrap import binary_function
f = binary_function('f', expr)
%%time
f(x,y,z).evalf(2, subs={x:1, y:4, z:2})
"""
Explanation: Use of autowrap made the calculation 3 orders of magnitude faster than substitution of values into the original expression!
Another way is to use binary_function:
End of explanation
"""
|
bokeh/bokeh | examples/howto/server_embed/notebook_embed.ipynb | bsd-3-clause | import yaml
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.themes import Theme
from bokeh.io import show, output_notebook
from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
output_notebook()
"""
Explanation: Embedding a Bokeh server in a Notebook
This notebook shows how a Bokeh server application can be embedded inside a Jupyter notebook.
End of explanation
"""
def bkapp(doc):
df = sea_surface_temperature.copy()
source = ColumnDataSource(data=df)
plot = figure(x_axis_type='datetime', y_range=(0, 25),
y_axis_label='Temperature (Celsius)',
title="Sea Surface Temperature at 43.18, -70.43")
plot.line('time', 'temperature', source=source)
def callback(attr, old, new):
if new == 0:
data = df
else:
data = df.rolling('{0}D'.format(new)).mean()
source.data = ColumnDataSource.from_df(data)
slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days")
slider.on_change('value', callback)
doc.add_root(column(slider, plot))
doc.theme = Theme(json=yaml.load("""
attrs:
Figure:
background_fill_color: "#DDDDDD"
outline_line_color: white
toolbar_location: above
height: 500
width: 800
Grid:
grid_line_dash: [6, 4]
grid_line_color: white
""", Loader=yaml.FullLoader))
"""
Explanation: There are various application handlers that can be used to build up Bokeh documents. For example, there is a ScriptHandler that uses the code from a .py file to produce Bokeh documents. This is the handler that is used when we run bokeh serve app.py. In the notebook we can use a function to define a Bokeh application.
Here is the function bkapp(doc) that defines our app:
End of explanation
"""
show(bkapp) # notebook_url="http://localhost:8888"
"""
Explanation: Now we can display our application using show, which will automatically create an Application that wraps bkapp using FunctionHandler. The end result is that the Bokeh server will call bkapp to build new documents for every new sessions that is opened.
Note: If the current notebook is not displayed at the default URL, you must update the notebook_url parameter in the comment below to match, and pass it to show.
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion | notebooks/building_production_ml_systems/labs/4b_streaming_data_inference.ipynb | apache-2.0 | import os
import shutil
import googleapiclient.discovery
import numpy as np
import tensorflow as tf
from google import api_core
from google.api_core.client_options import ClientOptions
from google.cloud import bigquery
from matplotlib import pyplot as plt
from tensorflow import keras
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Dense, DenseFeatures
from tensorflow.keras.models import Sequential
print(tf.__version__)
# Change below if necessary
PROJECT = !gcloud config get-value project # noqa: E999
PROJECT = PROJECT[0]
BUCKET = PROJECT
REGION = "us-central1"
%env PROJECT=$PROJECT
%env BUCKET=$BUCKET
%env REGION=$REGION
%%bash
gcloud config set project $PROJECT
gcloud config set ai_platform/region $REGION
"""
Explanation: Working with Streaming Data
Learning Objectives
1. Learn how to process real-time data for ML models using Cloud Dataflow
2. Learn how to serve online predictions using real-time data
Introduction
It can be useful to leverage real time data in a machine learning model when making a prediction. However, doing so requires setting up a streaming data pipeline which can be non-trivial.
Typically you will have the following:
- A series of IoT devices generating and sending data from the field in real-time (in our case these are the taxis)
- A messaging bus to that receives and temporarily stores the IoT data (in our case this is Cloud Pub/Sub)
- A streaming processing service that subscribes to the messaging bus, windows the messages and performs data transformations on each window (in our case this is Cloud Dataflow)
- A persistent store to keep the processed data (in our case this is BigQuery)
These steps happen continuously and in real-time, and are illustrated by the blue arrows in the diagram below.
Once this streaming data pipeline is established, we need to modify our model serving to leverage it. This simply means adding a call to the persistent store (BigQuery) to fetch the latest real-time data when a prediction request comes in. This flow is illustrated by the red arrows in the diagram below.
<img src='../assets/taxi_streaming_data.png' width='80%'>
In this lab we will address how to process real-time data for machine learning models. We will use the same data as our previous 'taxifare' labs, but with the addition of trips_last_5min data as an additional feature. This is our proxy for real-time traffic.
End of explanation
"""
bq = bigquery.Client()
dataset = bigquery.Dataset(bq.dataset("taxifare"))
try:
bq.create_dataset(dataset) # will fail if dataset already exists
print("Dataset created.")
except api_core.exceptions.Conflict:
print("Dataset already exists.")
"""
Explanation: Re-train our model with trips_last_5min feature
In this lab, we want to show how to process real-time data for training and prediction. So, we need to retrain our previous model with this additional feature. Go through the notebook 4a_streaming_data_training.ipynb. Open and run the notebook to train and save a model. This notebook is very similar to what we did in the Introduction to Tensorflow module but note the added feature for trips_last_5min in the model and the dataset.
Simulate Real Time Taxi Data
Since we don’t actually have real-time taxi data we will synthesize it using a simple python script. The script publishes events to Google Cloud Pub/Sub.
Inspect the iot_devices.py script in the taxicab_traffic folder. It is configured to send about 2,000 trip messages every five minutes with some randomness in the frequency to mimic traffic fluctuations. These numbers come from looking at the historical average of taxi ride frequency in BigQuery.
In production this script would be replaced with actual taxis with IoT devices sending trip data to Cloud Pub/Sub.
To execute the iot_devices.py script, launch a terminal and navigate to the asl-ml-immersion/notebooks/building_production_ml_systems/labs directory. Then run the following two commands.
bash
PROJECT_ID=$(gcloud config get-value project)
python3 ./taxicab_traffic/iot_devices.py --project=$PROJECT_ID
You will see new messages being published every 5 seconds. Keep this terminal open so it continues to publish events to the Pub/Sub topic. If you open Pub/Sub in your Google Cloud Console, you should be able to see a topic called taxi_rides.
Create a BigQuery table to collect the processed data
In the next section, we will create a dataflow pipeline to write processed taxifare data to a BigQuery Table, however that table does not yet exist. Execute the following commands to create a BigQuery dataset called taxifare and a table within that dataset called traffic_realtime.
End of explanation
"""
dataset = bigquery.Dataset(bq.dataset("taxifare"))
table_ref = dataset.table("traffic_realtime")
SCHEMA = [
bigquery.SchemaField("trips_last_5min", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("time", "TIMESTAMP", mode="REQUIRED"),
]
table = bigquery.Table(table_ref, schema=SCHEMA)
try:
bq.create_table(table)
print("Table created.")
except api_core.exceptions.Conflict:
print("Table already exists.")
"""
Explanation: Next, we create a table called traffic_realtime and set up the schema.
End of explanation
"""
%%bigquery
SELECT
*
FROM
`taxifare.traffic_realtime`
ORDER BY
time DESC
LIMIT 10
"""
Explanation: Launch Streaming Dataflow Pipeline
Now that we have our taxi data being pushed to Pub/Sub, and our BigQuery table set up, let’s consume the Pub/Sub data using a streaming DataFlow pipeline.
The pipeline is defined in ./taxicab_traffic/streaming_count.py. Open that file and inspect it.
There are 5 transformations being applied:
- Read from PubSub
- Window the messages
- Count number of messages in the window
- Format the count for BigQuery
- Write results to BigQuery
TODO: Open the file ./taxicab_traffic/streaming_count.py and find the TODO there. Specify a sliding window that is 5 minutes long, and gets recalculated every 15 seconds. Hint: Reference the beam programming guide for guidance. To check your answer reference the solution.
For the second transform, we specify a sliding window that is 5 minutes long, and recalculate values every 15 seconds.
In a new terminal, launch the dataflow pipeline using the command below. You can change the BUCKET variable, if necessary. Here it is assumed to be your PROJECT_ID.
bash
PROJECT_ID=$(gcloud config get-value project)
REGION=$(gcloud config get-value ai_platform/region)
BUCKET=$PROJECT_ID # change as necessary
python3 ./taxicab_traffic/streaming_count.py \
--input_topic taxi_rides \
--runner=DataflowRunner \
--project=$PROJECT_ID \
--region=$REGION \
--temp_location=gs://$BUCKET/dataflow_streaming
Once you've submitted the command above you can examine the progress of that job in the Dataflow section of Cloud console.
Explore the data in the table
After a few moments, you should also see new data written to your BigQuery table as well.
Re-run the query periodically to observe new data streaming in! You should see a new row every 15 seconds.
End of explanation
"""
# TODO 2a. Write a function to take most recent entry in `traffic_realtime`
# table and add it to instance.
def add_traffic_last_5min(instance):
bq = bigquery.Client()
query_string = """
TODO: Your code goes here
"""
trips = bq.query(query_string).to_dataframe()["trips_last_5min"][0]
instance["traffic_last_5min"] = # TODO: Your code goes here.
return instance
"""
Explanation: Make predictions from the new data
In the rest of the lab, we'll referece the model we trained and deployed from the previous labs, so make sure you have run the code in the 4a_streaming_data_training.ipynb notebook.
The add_traffic_last_5min function below will query the traffic_realtime table to find the most recent traffic information and add that feature to our instance for prediction.
Exercise. Complete the code in the function below. Write a SQL query that will return the most recent entry in traffic_realtime and add it to the instance.
End of explanation
"""
add_traffic_last_5min(
instance={
"dayofweek": 4,
"hourofday": 13,
"pickup_longitude": -73.99,
"pickup_latitude": 40.758,
"dropoff_latitude": 41.742,
"dropoff_longitude": -73.07,
}
)
"""
Explanation: The traffic_realtime table is updated in realtime using Cloud Pub/Sub and Dataflow so, if you run the cell below periodically, you should see the traffic_last_5min feature added to the instance and change over time.
End of explanation
"""
# TODO 2b. Write code to call prediction on instance using realtime traffic info.
# Hint: Look at the "Serving online predictions" section of this page https://cloud.google.com/ml-engine/docs/tensorflow/custom-prediction-routine-keras
MODEL_NAME = "taxifare"
VERSION_NAME = "traffic"
service = googleapiclient.discovery.build("ml", "v1", cache_discovery=False)
name = "projects/{}/models/{}/versions/{}".format(
PROJECT, MODEL_NAME, VERSION_NAME
)
instance = # TODO
response = # TODO
if "error" in response:
raise RuntimeError(response["error"])
else:
print(response["predictions"][0]["output_1"][0])
"""
Explanation: Finally, we'll use the python api to call predictions on an instance, using the realtime traffic information in our prediction. Just as above, you should notice that our resulting predicitons change with time as our realtime traffic information changes as well.
Exercise. Complete the code below to call prediction on an instance incorporating realtime traffic info. You should
- use the function add_traffic_last_5min to add the most recent realtime traffic data to the prediction instance
- call prediction on your model for this realtime instance and save the result as a variable called response
- parse the json of response to print the predicted taxifare cost
End of explanation
"""
|
broundy/udacity | nanodegrees/deep_learning_foundations/unit_1/project_1/dlnd-your-first-neural-network.ipynb | unlicense | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*10].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
### Forward pass ###
# Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
### Backward pass ###
# Output error
output_errors = (targets - final_outputs)
# Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors)
hidden_grad = hidden_errors * (hidden_outputs * (1 - hidden_outputs))
# Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T)
self.weights_input_to_hidden += self.lr * np.dot(hidden_grad, inputs.T)
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
# Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import sys
### Set the hyperparameters here ###
epochs = 700
learning_rate = 0.05
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of epochs
This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
"""
Explanation: Thinking about your results
Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
Your answer below
Unit tests
Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.
End of explanation
"""
|
dmolina/es_intro_python | 03-example_iris.ipynb | gpl-3.0 | from IPython.display import IFrame
IFrame('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', width=300, height=200)
"""
Explanation: Getting started in scikit-learn with the famous iris dataset
From the video series: Introduction to machine learning with scikit-learn
Agenda
What is the famous iris dataset, and how does it relate to machine learning?
How do we load the iris dataset into scikit-learn?
How do we describe a dataset using machine learning terminology?
What are scikit-learn's four key requirements for working with data?
Introducing the iris dataset
50 samples of 3 different species of iris (150 samples total)
Measurements: sepal length, sepal width, petal length, petal width
End of explanation
"""
# import load_iris function from datasets module
from sklearn.datasets import load_iris
# save "bunch" object containing iris dataset and its attributes
iris = load_iris()
type(iris)
# print the iris data
print(iris.data)
"""
Explanation: Machine learning on the iris dataset
Framed as a supervised learning problem: Predict the species of an iris using the measurements
Famous dataset for machine learning because prediction is easy
Learn more about the iris dataset: UCI Machine Learning Repository
Loading the iris dataset into scikit-learn
End of explanation
"""
# print the names of the four features
print(iris.feature_names)
# print integers representing the species of each observation
print(iris.target)
# print the encoding scheme for species: 0 = setosa, 1 = versicolor, 2 = virginica
print(iris.target_names)
"""
Explanation: Machine learning terminology
Each row is an observation (also known as: sample, example, instance, record)
Each column is a feature (also known as: predictor, attribute, independent variable, input, regressor, covariate)
End of explanation
"""
# check the types of the features and response
print(type(iris.data))
print(type(iris.target))
# check the shape of the features (first dimension = number of observations, second dimensions = number of features)
print(iris.data.shape)
# check the shape of the response (single dimension matching the number of observations)
print(iris.target.shape)
# store feature matrix in "X"
X = iris.data
# store response vector in "y"
y = iris.target
"""
Explanation: Each value we are predicting is the response (also known as: target, outcome, label, dependent variable)
Classification is supervised learning in which the response is categorical
Regression is supervised learning in which the response is ordered and continuous
Requirements for working with data in scikit-learn
Features and response are separate objects
Features and response should be numeric
Features and response should be NumPy arrays
Features and response should have specific shapes
End of explanation
"""
|
Mashimo/datascience | 02-Classification/jhu.ipynb | apache-2.0 | import pandas as pd # Start by importing the data
X = pd.read_csv('../datasets/pml-training.csv', low_memory=False)
X.shape
"""
Explanation: LDA: Linear discriminant Analysis
A prediction model for Weight Lifting based on sensors to predict how well an exercise is performed.
Project goal
In this project we will use data from accelerometers on the belt, forearm, arm and dumbell of 6 participants.
They were asked to perform barbell lifts correctly and incorrectly in 5 different ways.
More information is available from the website here: http://groupware.les.inf.puc-rio.br/har (see the section on the Weight Lifting Exercise Dataset).
The goal of the project is to predict the manner in which they did the exercise. This is the “classe” variable in the data set: A - E
- exactly according to the specification (Class A)
- throwing the elbows to the front (Class B)
- lifting the dumbbell only halfway (Class C)
- lowering the dumbbell only halfway (Class D)
- throwing the hips to the front (Class E).
Read the data
Velloso, E.; Bulling, A.; Gellersen, H.; Ugulino, W.; Fuks, H.: Qualitative Activity Recognition of Weight Lifting Exercises. Proceedings of 4th International Conference in Cooperation with SIGCHI (Augmented Human '13). Stuttgart, Germany: ACM SIGCHI, 2013 (http://groupware.les.inf.puc-rio.br/work.jsf?p1=11201).
End of explanation
"""
X.columns
"""
Explanation: the data set has 19622 obs. (rows) of 160 variables (columns)
End of explanation
"""
X.rename(columns = {X.columns[0] : 'ID'}, inplace = True)
X.head(3)
"""
Explanation: This is not really necessary but I like to rename the first column from “X” to a meaningful name:
End of explanation
"""
X.info()
X.describe()
"""
Explanation: Explore the data
End of explanation
"""
X.classe.hist();
"""
Explanation: This is how the five classes are distributed:
End of explanation
"""
y = X.classe.copy() # copy “y” column (the target) values out
X.drop(['classe'], axis=1, inplace=True) # then, drop y column
"""
Explanation: Classes are almost uniformly distributed, with a small prevalence for A-class.
Preprocess the data
End of explanation
"""
columnsToDelete = ['ID','user_name', 'raw_timestamp_part_1', 'raw_timestamp_part_2','cvtd_timestamp', 'new_window', 'num_window']
X.drop(columnsToDelete, axis = 1, inplace=True)
"""
Explanation: Feature extraction
160 features are computationally expensive for the model training, so we aim to reduce them.
Remove user- and time-dependent features
First of all, it’s clear from the summary above that the first six variables have no use, since they are user-dependent and time-dependent; so we remove them:
End of explanation
"""
X.isnull().values.any()
"""
Explanation: Remove features with missing values
End of explanation
"""
X.dropna(axis=1, inplace=True)
X.shape
"""
Explanation: From the statement above, some variables have missing values (NaN).
We could substitute them with the average for that variable but there are many missing values and this is not improving the model accuracy (I tried).
Instead of less-accurate imputation of missing data, I just remove all predictors with NA values. Hard but fair …
End of explanation
"""
X.head(3)
"""
Explanation: Finally we have reduced the dataset to 52 features, less than one third.
End of explanation
"""
from sklearn.feature_selection import VarianceThreshold
selector = VarianceThreshold()
selector.fit(X)
"""
Explanation: Remove features with near zero variance
There is something else we can do: drop features which have a variance near zero meaning they don’t provide enough value for predictions.
We will use the sklearn module VarianceThreshold to find out which features have big enough variance.
End of explanation
"""
mask = selector.variances_ > 0.5 # arbitrary value 0.5
"""
Explanation: Find all features with variance larger than 0.5 (you can tune it):
End of explanation
"""
X_hv = X.loc[:, mask == True]
X_hv.shape
totalObs = X_hv.shape[0] # this will soon come useful
totalObs
"""
Explanation: Copy all "high variance" features into a new dataframe x_hv:
End of explanation
"""
from sklearn import preprocessing
normaliser = preprocessing.Normalizer()
# this will keep the columns names
xNormalised = pd.DataFrame(normaliser.fit_transform(X_hv), columns = X_hv.columns)
"""
Explanation: Another 6 features removed.
A final reduction could be to remove all the features that have a high correlation between them, which I skip.
Normalisation
LDA (and in general other classification models) works better if the data set is normalised.
End of explanation
"""
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(xNormalised, y, test_size=0.2, random_state=7)
"""
Explanation: Split the data into training and testing
For each model I will measure the out of sample error that is the error rate you get on a new data set.
The purpose of using a different data set than training is model checking. I want to validate how well the model got trained.
I will calculate the out of sample error by looking at the accuracy.
End of explanation
"""
y.value_counts() # frequency of each category
"""
Explanation: Baseline prediction
A baseline is needed to see if any model trained is really that useful. Can it beat the simplest baseline?
Our baseline will be deciding randomly the "classe" category based on the frequency in the training set (classe A is 28% of the times, classe B is 19% and so on …)
The baseline accuracy would be around 0.2, as there are five classes to choose.
Any model with a higher accuracy than the baseline is a better model.
End of explanation
"""
def baselinePredict(aNumber):
# aNumber: expects a number between 0 and 1
# totalObs is a global vaiable
if (aNumber < 5580 / totalObs):
return 'A'
elif (aNumber < (5580 + 3797) / totalObs):
return 'B'
elif (aNumber < (5580 + 3797 + 3607) / totalObs):
return 'C'
elif (aNumber < (5580 + 3797 + 3607 + 3422) / totalObs):
return 'D'
else:
return 'E'
"""
Explanation: Let's get the baseline by hard-coding the frequencies of the classes usage:
End of explanation
"""
import random
test = random.random() # a number between 0 and 1
baselinePredict(test)
yTotals = ytest.count()
correctPredictions = 0
for i in range(yTotals):
rndPrediction = baselinePredict(random.random())
if (rndPrediction == ytest.iloc[i]):
correctPredictions += 1
print("Percentage of correct predictions for all test dataset: ", correctPredictions / yTotals)
"""
Explanation: Let's try a test:
End of explanation
"""
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
modelOneVsAll = OneVsRestClassifier(SVC(kernel='linear'))
import time # to measure the time needed for training
print ("Training the One versus All model ...")
s = time.time()
modelOneVsAll.fit(Xtrain, ytrain)
print("Done! Completed in: ", time.time() - s, "seconds")
modelOneVsAll.score(Xtest, ytest)
"""
Explanation: This is our baseline. As it's random calculated, it varies; usually between 0.2 and 0.22 circa.
Now we have everything to start training our models!
Multi-class logistic: One vs All
We now train a one-versus-all model.
End of explanation
"""
from sklearn.ensemble import ExtraTreesClassifier
tree = ExtraTreesClassifier()
tree.fit(Xtrain, ytrain)
feat_importances = pd.Series(tree.feature_importances_, index=Xtest.columns)
feat_importances.nlargest(4)
"""
Explanation: The model scores a better accuracy than the baseline but it took a long time.
Using a Logistic multinomial regression hold similar accuracy results.
Reduce even more the features
One possibility would be to apply the LR model to a very reduced subset of features.
To do that we need first to find out which are the most important features.
As we remember a good way to have them is to use a decision tree. Let's use the tree as a shortcut to find the most important features.
Train a decision tree
End of explanation
"""
top4features = ['pitch_forearm', 'yaw_belt', 'magnet_dumbbell_z', 'roll_dumbbell']
Xtrain_reduced = Xtrain[top4features].copy()
Xtrain_reduced.shape
Xtest_reduced = Xtest[top4features].copy()
print ("Training the OneVsAll model on 4 features ...")
s = time.time()
modelOneVsAll.fit(Xtrain_reduced, ytrain)
print("Done! Completed in: ", time.time() - s, "seconds")
modelOneVsAll.score(Xtest_reduced, ytest)
"""
Explanation: Re-train the OneVsAll model with only the top four features.
Now that we have the top 4 features, we can train again the model only with them:
End of explanation
"""
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # LDA
reduceLDA = LinearDiscriminantAnalysis(n_components = 4)
reduceLDA.fit(Xtrain, ytrain)
XtrainLDAred = reduceLDA.transform(Xtrain)
XtestLDAred = reduceLDA.transform(Xtest)
"""
Explanation: Well, the time greatly reduced but the score is now very low, barely better than the baseline ...
Not so useful. How to do?
Reduce the features using LDA
LDA comes to rescue in the form of a data reduction similar to PCA. The LDA model can be applied to the dataset and transform it into a smaller set of components, between 1 and the number of classes minus 1 .
Since we have 5 classes we can reduce up to 4 components (this is why we choose earlier 4 top features)
End of explanation
"""
print ("Training the One Vs All model on reduced 4-components dataset ...")
s = time.time()
modelOneVsAll.fit(XtrainLDAred, ytrain)
print("Done! Completed in: ", time.time() - s, "seconds")
modelOneVsAll.score(XtestLDAred, ytest)
"""
Explanation: And now we can again apply the OneVsAll model but this time to the reduced dataset:
End of explanation
"""
modelLDA = LinearDiscriminantAnalysis()
print ("Training the LDA model ...")
s = time.time()
modelLDA.fit(Xtrain, ytrain)
print("Done! Completed in: ", time.time() - s, "seconds")
print("LDA Model accuracy:", modelLDA.score(Xtest,ytest))
"""
Explanation: The score is now similar to the one using the entire dataset but the training is much faster.
This shows how the LDA data reduction can help to have good results and times by applying the model on a reduced dataset.
LDA Model
But LDA can do more: it can also be used as a classifier directly!
We will train it (and time this) on the original dataset, not the reduced one; the LDA classifier will automatically reduce the dataset.
End of explanation
"""
tree.score(Xtest, ytest)
"""
Explanation: Great! The LDA model is much better than the baseline and similar to the OneVsAll results plus it was quite fast.
Note: 70% of correct predictions is not exactly perfect but it was not the goal of this exercise: you can get almost perfect results using decision trees or random forests.
End of explanation
"""
|
johnpfay/environ859 | 07_DataWrangling/notebooks/02-Numpy-with-FeatureClasses.ipynb | gpl-3.0 | #Import arcpy and numpy
import arcpy
import numpy as np
#Point to the HUC12.shp feature class in the Data folder
huc12_fc = '../Data/HUC12.shp'
print arcpy.Exists(huc12_fc)
"""
Explanation: Using NumPy with ArcGIS: FeatureClass to Numpy
Demonstrates manipulation of feature class attribute data using Numpy. By no means is this an in-depth introduction, let alone discussion, of NumPy, but it does at least familiarize you with what NumPy is about and how it can be used with ArcGIS feature classes. The links below provide more in-depth reading on NumPy and how it's used with feature classes.
https://jakevdp.github.io/PythonDataScienceHandbook/index.html#2.-Introduction-to-NumPy
http://desktop.arcgis.com/en/arcmap/latest/analyze/arcpy-data-access/featureclasstonumpyarray.htm
End of explanation
"""
#List the fields we want to convert
fieldList = ["SHAPE@XY","HUC_8","HUC_12","ACRES"]
arrHUCS = arcpy.da.FeatureClassToNumPyArray(huc12_fc,fieldList)
"""
Explanation: Here,we convert the feature class to a NumPy array using ArcPy's FeatureClassToNumPyArray function
End of explanation
"""
#What is the type of the arrHUCs variable and how many records does it contain
print type(arrHUCS)
print arrHUCS.size
#What are the data types stored in this array
print arrHUCS.dtype
#Or, just what are the names of the "columns"
print arrHUCS.dtype.names
#Show the first row of data
print arrHUCS[0]
#Show the first 5 rows of data
print arrHUCS[0:5]
#Show the HUC8 value of the 5th row
print arrHUCS[4]['HUC_8']
#List all the HUC12s
print arrHUCS['HUC_12']
#List the mean area of all HUCs
print arrHUCS['ACRES'].mean()
"""
Explanation: As a NumPy array, we can do different operations on the feature class. But first, let's inspect the array's properties.
End of explanation
"""
#First we make a boolean mask and show the first 10 records
arrMask = (arrHUCS["HUC_8"] == '03040103')
arrMask[:10]
#Now we apply the mask to isolate record where this is true
arrSelectedHUC8 = arrHUCS[arrMask]
#The original array had 201 records, how many records does this have?
print arrSelectedHUC8.size
#Print the first 10 rows
arrSelectedHUC8[10]
#Calculate the mean area of these HUCs
arrSelectedHUC8['ACRES'].mean()
#Plot a historam of HUC_12 areas
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set() # set plot style
plt.hist(arrHUCS['ACRES']);
plt.title('Area Distribution of HUC_12s')
plt.xlabel('Area (acres)')
plt.ylabel('number');
"""
Explanation: We can also subset records in our array which we will do as a two step process. First we create a boolean mask array, that is an array of true and false values where a record is true if a condition is met. Then we apply this mask to our original array to isolate records where the mask is true
End of explanation
"""
|
FRidh/pstd | examples/basic_example.ipynb | bsd-3-clause | import sys
sys.path.append("..")
import numpy as np
from pstd import PSTD, PML, Medium, PointSource
from acoustics import Signal
#import seaborn as sns
%matplotlib inline
"""
Explanation: Basic example
In this notebook we show how to perform a basic simulation.
End of explanation
"""
x = 30.0
y = 20.0
z = 0.0
soundspeed = 343.2
density = 1.296
maximum_frequency_target = 200.0
"""
Explanation: Configuration
The following are the parameters for our simulation
End of explanation
"""
medium = Medium(soundspeed=soundspeed, density=density)
"""
Explanation: Create model
We now create a model. Waves propagate in a medium which we define first.
End of explanation
"""
pml = PML(absorption_coefficient=(1000.0, 1000.0), depth=10.0)
"""
Explanation: The model is only finite and to prevent aliasing we need a Perfectly Matched Layer.
End of explanation
"""
model = PSTD(
maximum_frequency=maximum_frequency_target,
pml=pml,
medium=medium,
cfl=None,
size=[x, y]
)
"""
Explanation: Now we create the actual model.
End of explanation
"""
source_position = (x/4.0, y/2.0)
source = model.add_object('source', 'PointSource', position=source_position,
excitation='pulse', quantity='pressure', amplitude=0.1)
"""
Explanation: In this example our source excites a pulse.
End of explanation
"""
receiver_position = (x*3.0/4.0, y/2.0)
receiver = model.add_object('receiver', 'Receiver', position=receiver_position, quantity='pressure')
"""
Explanation: We also add a receiver on the other side of the domain
End of explanation
"""
print(model.overview())
"""
Explanation: Check model
To get a quick overview of all parameters, for example to check them, we can print one
End of explanation
"""
_ = model.plot_scene()
"""
Explanation: To check whether the geometry is as we want it to be, we can simply draw it.
End of explanation
"""
model.run(seconds=0.002)
"""
Explanation: Running the simulation
Now that we've defined and checked our model we can run it.
With model.run() you can specify the amount of time steps or amount of seconds it should run.
End of explanation
"""
_ = model.plot_field()
"""
Explanation: Let's see how the sound pressure field looks like now.
End of explanation
"""
%%prun
model.run(seconds=0.06)
"""
Explanation: It might happen that you realize that you actually need to calculate a bit further. This can easily be done, since the state is remembered. Simply use model.run() again and the simulation continues.
End of explanation
"""
_ = model.plot_field()
"""
Explanation: as you can see.
End of explanation
"""
_ = receiver.recording().plot()
"""
Explanation: Recordings
The receivers can record a quantity at a specific location. In this case, we're measuring an impulse response. The method receiver.recording() returns an instance of acoustics.Signal.
End of explanation
"""
model.restart()
"""
Explanation: If however, you want to restart the simulation you can do so with model.restart().
End of explanation
"""
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
"""
Explanation: Show log
Some simulations can take a very long time. To check how far the simulation is, you can check the log.
End of explanation
"""
model.run(steps=10)
_ = model.plot_field()
"""
Explanation: When we now run the simulation, you will see which step it is at.
End of explanation
"""
|
toddstrader/deep-learning | language-translation/dlnd_language_translation.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
"""
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
"""
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
"""
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoding_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Return the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)
End of explanation
"""
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for dencoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_decoding_input(process_decoding_input)
"""
Explanation: Process Decoding Input
Implement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the begining of each batch.
End of explanation
"""
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
"""
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().
End of explanation
"""
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
"""
Explanation: Decoding - Training
Create training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.
End of explanation
"""
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: Maximum length of
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
"""
Explanation: Decoding - Inference
Create inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().
End of explanation
"""
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
"""
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Create RNN cell for decoding using rnn_size and num_layers.
Create the output fuction using lambda to transform it's input, logits, to class logits.
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
"""
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Apply embedding to the input data for the encoder.
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
Apply embedding to the target data for the decoder.
Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
End of explanation
"""
# Number of Epochs
epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Number of Layers
num_layers = None
# Embedding Size
encoding_embedding_size = None
decoding_embedding_size = None
# Learning Rate
learning_rate = None
# Dropout Keep Probability
keep_probability = None
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import time
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target_batch,
[(0,0),(0,max_seq - target_batch.shape[1]), (0,0)],
'constant')
if max_seq - batch_train_logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
"""
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
"""
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
"""
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
"""
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation
"""
|
chi-hung/notebooks | Crawling_Basics.ipynb | mit | soup = BeautifulSoup('<b class="boldest">Extremely bold</b>',"html.parser")
tag = soup.b
type(tag)
"""
Explanation: Now, I'm going to learn Beautiful Soup:
Tag:
End of explanation
"""
print tag.name
print tag["class"]
print tag.attrs
"""
Explanation: A tag has a name (say, "someTag"). It contains a set of attribute:value(s). The following is an example of the structure of a tag:
$\text{<someTag attr1="value" attr2="value1 value2"> A String </someTag>}$
End of explanation
"""
print type(tag.string)
print tag.string
"""
Explanation: i.e. we can use .attrs to show the dictionary of a specified tag (attr, value).
Also, the things within the begin and the end tag (i.e. within $<b>$ and $</b>$) are strings:
End of explanation
"""
print tag.string.parent # a NavigableString obj
print unicode(tag.string.parent) # a unicode string
print repr(unicode(tag.string.parent)) # a unicode string (in repr(), if the string is "unicode" encoded, it will begin by u')
#check the types of the above stuffs:
print type(tag.string.parent)
print type(unicode(tag.string.parent))
print type(repr(unicode(tag.string.parent)))
"""
Explanation: These strings are objects of the type NavigableString. i.e. we can do further actions such as to find its direct parent:
End of explanation
"""
print tag.string.parent.parent
"""
Explanation: or find its parent's parent(now we have the easiest situation, i.e. they have only one direct parent):
End of explanation
"""
css_soup = BeautifulSoup('<p class="body strikeout"></p>')
print css_soup.p['class']
# ["body", "strikeout"]
css_soup = BeautifulSoup('<p class="body strikeout"></p>', "lxml")
print css_soup.p['class']
"""
Explanation: The most common multi-valued attribute is class:
End of explanation
"""
id_soup = BeautifulSoup('<p id="my id"></p>')
id_soup.p['id']
# 'my id'
"""
Explanation: on the other hand, id is not a multi-valued attribute:
End of explanation
"""
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were</p>
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'html.parser')
"""
Explanation: Now, let's see a slightly complex situation:
End of explanation
"""
print soup.prettify()
print soup.p
"""
Explanation: First of all, let's see the prettified structure:
End of explanation
"""
for tag in soup.find_all("p"):
print tag
print tag.name
print tag.attrs
print tag["class"]
print type(tag["class"][0])
print tag.string
print "==================================================================================================="
"""
Explanation: the "find_all" method, as well as tag.name, tag.attrs, tag.string, tag['attrName']
apparently, the above is not what I want. I actually would like to obtain all the tags which is labeled as "p". This can be achieved by taking the advantage of the method "find_all":
End of explanation
"""
for string in soup.strings:
print(repr(string))
print repr(string.encode("ascii"))
print
"""
Explanation: move from unicode to ascii:
End of explanation
"""
for string in soup.stripped_strings:
print(repr(string.encode("ascii")))
"""
Explanation: So many spaces there. The spaces can be removed via stripping the strings:
End of explanation
"""
[repr(string.encode("ascii")) for string in soup.stripped_strings]
print soup.prettify()
"""
Explanation: Well, we could also put all the stripped strings into a list:
End of explanation
"""
link = soup.a
print link
print
# <a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>
for parent in link.parents:
if parent is None:
print(parent)
else:
print(parent.name)
"""
Explanation: parents and descendents
End of explanation
"""
print soup.name
for child in soup.children:
print child
"""
Explanation: We've successfully found the parent of the specified tag. This can be verified by seeing the structure obtained from the method soup.prettify().
End of explanation
"""
print soup.body.name
print type(soup.body.descendants)
for child in soup.body.descendants:
print child
"""
Explanation: The above result is understandable since from the method soup.prettify() we know already that $<html>$ and $<p>$ are the direct children of the parent $[document]$.
Now, let's see its descendants:
End of explanation
"""
for single_tag in soup.find_all("p"):
for string in single_tag:
print string
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
print soup.find_all("p")[0]
print soup.find_all("p")[0].get("class")
print soup.find_all('a')
for link in soup.find_all('a'):
print type(link)
print(link.get('href'))
"""
Explanation: Say, I'd like to get all the strings of all the "p" tags. How to do this? Let's see:
End of explanation
"""
a = [1, 2, 3, 4]
b=123
print type(a.__iter__)
print type(a.__init__)
print type(b.__init__)
print type(b.__iter__)
"""
Explanation: generator and iterator
I now have the problem about the type "generator". I'd like to understand both the types "generator" and "iterator" in Python better.
End of explanation
"""
# Using the generator pattern (an iterable)
class firstn(object):
def __init__(self, n):
self.n = n
self.num, self.nums = 0, []
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self): # Okay, I knew this. In Python3 one should be using __next__.
return self.next()
def next(self):
if self.num < self.n:
cur, self.num = self.num, self.num+1
return cur
else:
raise StopIteration()
print type(firstn(3))
for j in firstn(3):
print j
print
a=firstn(3)
for _ in range(3):
print a.next()
"""
Explanation: When Python executes the for loop, it first invokes the $iter()$ method of the container to get the iterator of the container. It then repeatedly calls the next() method $next()$ method in Python 3.x) of the iterator until the iterator raises a StopIteration exception. Once the exception is raised, the for loop ends.
which means that a list is iterable. More details see: http://www.shutupandship.com/2012/01/understanding-python-iterables-and.html
Let's quote the summary from that site (written by Praveen Gollakota):
If you define a custom container class, think about whether it should also be an iterable.
It is quite easy to make a class support the iterator protocol.
Doing so will make the syntax more natural.
If I can't recall what the above summary says or how to make a class iterable in the future, I'll visit that website again.
Now, let's continue. What is a generator in Python?
End of explanation
"""
def firstn(n):
num = 0
while num < n:
yield num
num += 1
a=firstn(3)
for a in firstn(3):
print a
"""
Explanation: Well, this I understand. The above is nothing but the concept of the iterator.
(https://wiki.python.org/moin/Generators)
Python provides generator functions as a convenient shortcut to building iterators. Lets us rewrite the above iterator as a generator function:
End of explanation
"""
print [soup.find_all("p")[j].string for j in range(3)][1].parent.parent
"""
Explanation: I got it. In this way, the iterator can be built easier. That's it. I think 1) the design pattern of the generator is simpler than the design pattern of the iterator. 2) their behave should be the same.
Now, due to the string we have got has a special type (NavigableString), we can find the direct parent of these special strings.
End of explanation
"""
for string in soup.strings:
print(repr(string))
print((string))
print(type(string))
print"======================================"
"""
Explanation: Now, let's see if we can print out all the strings of the site:
End of explanation
"""
for string in soup.stripped_strings:
print(repr(string))
"""
Explanation: or the stripped strings(unnecessary spaces are removed)
End of explanation
"""
sibling_soup = BeautifulSoup("<a><b>text1</b><c>text2</c></b></a>","lxml")
print(sibling_soup.prettify())
print sibling_soup.b.next_sibling
print sibling_soup.b.previous_sibling
print sibling_soup.c.next_sibling
print sibling_soup.c.previous_sibling
"""
Explanation: Siblings:
End of explanation
"""
print type(soup.a.next_siblings)
"""
Explanation: back to the soup example. Let's find the siblings of the tag "a" in an iterating way.
End of explanation
"""
for sibling in soup.a.next_siblings:
print(repr(sibling))
"""
Explanation: apparently, it is a generator. So, we can do the following:
End of explanation
"""
a=["aaa","bbb","aac","caa","def"]
print 'aa' in 'aaa'
print map(lambda x:'aa' in x, a)
print filter(lambda x:'aa' in x, a)
"""
Explanation: learn: the function map and filter
End of explanation
"""
print reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
final_site_list = ['http://www.indeed.com/jobs?q=%22','data+scientist', '%22&l=', 'New+York']
print reduce(lambda x,y: x+y,final_site_list)
print "".join(final_site_list)
"""
Explanation: learn: the function reduce
End of explanation
"""
a=Counter({"a":1,"b":3})
print a
print a["b"]
cnt=Counter()
cnt.update(['red', 'blue', 'red', 'green', 'blue', 'blue'])
print cnt
cnt = Counter()
for word in ['red', 'blue', 'red', 'green', 'blue', 'blue']:
cnt[word] += 1
print cnt
cnt.update(['red', 'blue', 'red', 'green', 'blue', 'blue'])
print cnt
print cnt.items()
print type(cnt.items())
"""
Explanation: learn: the usage of Counter:
End of explanation
"""
frame=pd.DataFrame(cnt.items(), columns = ['color', 'numColor'])
ax=frame.plot(x = 'color', kind = 'bar', legend = None,color='green')
ax.set_xlabel("color")
ax.set_ylabel("color counts")
ax.set_ylim(0,7)
np.asarray([[1,2,3,4],[5,6,7,8]])
frame=pd.DataFrame(np.asarray([[1,2,3,4],[5,6,7,8]]), columns = ['01', '02','03','04'])
ax=frame.plot( kind = 'bar')
ax.set_ylim(0,10)
fig = ax.get_figure()
fig.savefig("tmp.svg")
frame.tail()
"""
Explanation: some very basic exercises about the pandas dataframe:
it accepts numpy array or dictionary-like inputs. Say, let's import data from a set of dictionary:|
End of explanation
"""
soup.get_text("|",strip=True)
"""
Explanation: the "get_text" method from Beautifulsoup:
End of explanation
"""
# splitlines:
# (code example from "https://www.tutorialspoint.com/python/string_splitlines.htm)
str = "Line1-a b c d e f\nLine2- a b c\n\nLine4- a b c d";
print str.splitlines( ) # the line will break into lines according to the line break \n
print str.splitlines(1) # line breaks will be included within the splitted string
# strip:
# (code example from "https://www.tutorialspoint.com/python/string_strip.htm")
print repr("0000000this is string example....\nwow!!!0000000".strip('0')) # the chars"0" at the both ends
# of the string will be removed.
print repr(" 0000000this is string example....\nwow!!!0000000 ".strip()) # the empty spaces will be removed.
print '1,,2'.split(',')
print '1,,2 345'.split()
"""
Explanation: get to know the Python built-in methods "strip", "splitlines" and "split":
End of explanation
"""
print "-----------------------------------"
print "tests of 're.match':"
print "-----------------------------------"
m=re.match(r'(bcd){2}',"bcdbcd")
print "re:(bcd){2} string: bcdbcd","match:",repr(m.group())
m=re.match(r'[a-zA-Z][3]{2}',"a33")
print "re:[a-zA-Z][3]{2} string: a33","match:",repr(m.group())
m=re.match(r'[a-zA-Z].+3',"f42312d")
print repr(m.group())
print "re:[a-zA-Z].+3 string: f42312d","match:",repr(m.group())
m = re.match(r"(\d+b)(\d{3})", "24b1632")
print "re:(\d+b)(\d{3}) string: 24b1632","match:",repr(m.group())
print "m.groups():",m.groups() # according to the parenthesis in re, the string will be split into different groups.
print "-----------------------------------"
print "tests of 're.match' with try&catch:"
print "-----------------------------------"
try:
m=re.match(r'(d3.js)',">")
print repr(m.group())
except AttributeError:
print "the re and the string does not match!"
except Exception: # catch Exception if AttributeError is not the cause
print "what's happening there?"
try:
m=re.match(r'(d3.js)',">","123454321")
print repr(m.group())
except AttributeError:
print "the re and the string does not match!"
except Exception: # catch Exception if AttributeError is not the cause
print "Oops, something wrong!"
print "-----------------------------------"
print "tests of 're.sub':"
print "-----------------------------------"
print "re:\d{2}.* string: 11 2 3 123 abc cde replacement: 00","\nresult:",re.sub(r"\d{2}.*","00", "11 2 3 123 abc cde\n")
print "re:\d{2} string: 11 2 3 123 abc cde replacement: 00","\nresult:",re.sub(r"\d{2}","00", "11 2 3 123 abc cde\n")
# the following line will remove any element of the string
# which is not within this list: [any alphabets(case irrelevant), ., 3, +]
print "re:[^a-zA-Z.3+] string: #c--d++e**1234.5 replacement: '' ","\nresult:",re.sub(r'[^a-zA-Z.3+]',"", "#c--d++e**1234.5\n")
print "-----------------------------------"
print "tests of 're.findall':"
print "-----------------------------------"
print repr(re.findall(r'\d+',"Jobs 1 to 10 of 382"))
"""
Explanation: understand the regular expression in Python (re.match, re.sub, re.findall):
(https://www.tutorialspoint.com/python/python_reg_expressions.htm)
End of explanation
"""
foo = [2, 18, 9, 22, 17, 24, 8, 12, 27]
print filter(lambda x: x % 3 == 0, foo) # from python official doc:
# filter(function, iterable)
# is equivalent to [item for item in iterable if function(item)]
print map(lambda x: x * 2 + 10, foo)
print reduce(lambda x, y: x + y, foo)
print sum(foo)
"""
Explanation: recall:lambda function (http://www.secnetix.de/olli/Python/lambda_functions.hawk)
End of explanation
"""
a = "Free your mind."
b = "Welcome to the desert... of the real."
c = "What is real? How do you define real?"
print(a)
print(a.split())
print
print(b)
print(b.split("o"))
print
print(c)
print(c.split(" ", 4))
print
print '+'.join("abc")
print '+'.join(["a","b","c"])
"""
Explanation: the use of the built-in functions "split" and "join":
End of explanation
"""
def skills_dict(doc_frequency):
prog_lang_dict = Counter({'R':doc_frequency['r'], 'Python':doc_frequency['python'],
'Java':doc_frequency['java'], 'C++':doc_frequency['c++'],
'Ruby':doc_frequency['ruby'],
'Perl':doc_frequency['perl'], 'Matlab':doc_frequency['matlab'],
'JavaScript':doc_frequency['javascript'], 'Scala': doc_frequency['scala']})
analysis_tool_dict = Counter({'Excel':doc_frequency['excel'], 'Tableau':doc_frequency['tableau'],
'D3.js':doc_frequency['d3.js'], 'SAS':doc_frequency['sas'],
'SPSS':doc_frequency['spss'], 'D3':doc_frequency['d3']})
hadoop_dict = Counter({'Hadoop':doc_frequency['hadoop'], 'MapReduce':doc_frequency['mapreduce'],
'Spark':doc_frequency['spark'], 'Pig':doc_frequency['pig'],
'Hive':doc_frequency['hive'], 'Shark':doc_frequency['shark'],
'Oozie':doc_frequency['oozie'], 'ZooKeeper':doc_frequency['zookeeper'],
'Flume':doc_frequency['flume'], 'Mahout':doc_frequency['mahout']})
database_dict = Counter({'SQL':doc_frequency['sql'], 'NoSQL':doc_frequency['nosql'],
'HBase':doc_frequency['hbase'], 'Cassandra':doc_frequency['cassandra'],
'MongoDB':doc_frequency['mongodb']})
overall_total_skills = prog_lang_dict + analysis_tool_dict + hadoop_dict + database_dict # Combine our Counter objects
return overall_total_skills
def text_cleaner(url):
try:
session = requests.Session()
soup = BeautifulSoup(session.get(url, timeout=5).content, 'lxml') # let our beautiful soup to parse the site
except:
print "connection error or something wrong. URL=",url
return
for script in soup(["script", "style"]): # Remove these two unnecessary tags: "script" and "style"
_=script.extract()
stopwords = nltk.corpus.stopwords.words('english') # a list of words which are not important
# we will ignore these words if they show up in the context
text=soup.get_text(" ",strip=True)
text=re.sub(r"[^a-zA-Z.3+]"," ",text) # preserve . and 3 for "d3.js". Also, preserve "+" for "c++"
content=[w.strip(".") for w in text.lower().split() if w not in stopwords] # remove any "." if it's contained
# at the begin or the end of the string
return content
def skills_info(city = None, state = None):
city_title = city
if city is None:
city_title = 'Nationwide'
final_site_list = ['http://www.indeed.com/jobs?q=%22','data+scientist', '%22&l=', city_title,
'%2C+', state]
final_site = "".join(final_site_list)
base_URL = "http://www.indeed.com"
print final_site
try:
session = requests.Session()
soup = BeautifulSoup(session.get(final_site).content,"lxml")
except:
print "connection error or something wrong. URL=",final_site
return
print soup.find(id = "searchCount")
num_jobs_area=soup.find(id = "searchCount").string
job_numbers = re.findall("\d+", num_jobs_area)
if len(job_numbers) > 3: # Have a total number of jobs greater than 1000
total_num_jobs = (int(job_numbers[2])*1000) + int(job_numbers[3])
else:
total_num_jobs = int(job_numbers[2])
if(total_num_jobs%10==0):
num_pages = total_num_jobs/10
else:
num_pages = 1+total_num_jobs/10
print "num_pages=",num_pages
job_descriptions = [] # store all our descriptions in this list
for i in range(num_pages): # loop through all of our search result pages
#for i in (0,):
start_num = str(i*10) # assign the multiplier of 10 to view the pages we want
current_page = "".join([final_site, "&start=", start_num])
print "Getting page", i,"start_num=",start_num
print current_page
job_link_area = BeautifulSoup(session.get(current_page).content,"lxml") # locate all of the job links within the <body> area
#join the URL base and the tail part of the URL using urlparse package:
job_URLs=[urlparse.urljoin(base_URL,link.a.get('href')) for link in job_link_area.select( 'h2[class="jobtitle"]')]
print job_URLs,len(job_URLs)
for URL in job_URLs:
final_description = text_cleaner(URL)
job_descriptions.append(final_description)
sleep(1) # so that we don't be jerks. If you have a very fast internet connection you could hit the server a lot!
doc_frequency=Counter()
for item in job_descriptions:
doc_frequency.update(item) # add all the words to the counter table and count the frequency of each words
#print doc_frequency.most_common(10)
print 'Done with collecting the job postings!'
print 'There were', len(job_descriptions), 'jobs successfully found.'
# Obtain our key terms and store them in a dict. These are the key data science skills we are looking for
overall_total_skills=skills_dict(doc_frequency)
final_frame = pd.DataFrame(overall_total_skills.items(), columns = ['Term', 'NumPostings']) # Convert these terms to a
# dataframe
# Change the values to reflect a percentage of the postings
final_frame.NumPostings = (final_frame.NumPostings)*100/len(job_descriptions) # Gives percentage of job postings
# having that term
# Sort the data for plotting purposes
final_frame.sort_values('NumPostings', ascending = False, inplace = True)
print final_frame
today = datetime.date.today()
# Get it ready for a bar plot
final_plot = final_frame.plot(x = 'Term', kind = 'bar', legend = None,
title = 'Percentage of Data Scientist Job Ads with a Key Skill, '+city_title+', '+str(today))
final_plot.set_ylabel('Percentage Appearing in Job Ads')
fig = final_plot.get_figure() # Have to convert the pandas plot object to a matplotlib object
fig.savefig(city_title+".pdf")
#return fig,final_frame
def skills_info_TW104():
final_site_list = ['https://www.104.com.tw/jobbank/joblist/joblist.cfm?jobsource=n104bank1&ro=0&keyword=','data+scientist',
'&excludeCompanyKeyword=醫藥+生物+生技+微脂體','&order=2&asc=0','&page=','1']
final_site = "".join(final_site_list)
print final_site
base_URL = "https://www.104.com.tw/"
country="Taiwan"
try:
session = requests.Session()
soup = BeautifulSoup(session.get(final_site).content,"lxml")
except:
print "connection error or something wrong. URL=",final_site
return
#print soup.find(class_="joblist_bar")
num_jobs_area=soup.select('li[class="right"]')
#print type(num_jobs_area)
#print num_jobs_area[0]
total_num_jobs = int( re.findall("\d+", str(num_jobs_area[0]))[0] )
print "num_jobs=",total_num_jobs
if(total_num_jobs%20)==0:
num_pages = total_num_jobs/20
else:
num_pages=1+total_num_jobs/20
print "num_pages=",num_pages
job_descriptions = [] # store all our descriptions in this list
for i in range(1,num_pages+1): # loop through all of our search result pages
#for i in (1,):
start_num = str(i)
final_site_list = final_site_list[:-1]
final_site = "".join(final_site_list)
current_page = "".join([final_site, start_num])
print "Getting page", i
print current_page
job_link_area = BeautifulSoup(session.get(current_page).content,"lxml") # locate all of the job links within the <body> area
#join the URL base and the tail part of the URL using urlparse package:
job_URLs=[urlparse.urljoin(base_URL,link.a.get('href')) for link in job_link_area.select('div[class="jobname_summary job_name"]')]
print job_URLs,len(job_URLs)
for URL in job_URLs:
final_description = text_cleaner(URL)
job_descriptions.append(final_description)
sleep(1) # so that we don't be jerks. If you have a very fast internet connection you could hit the server a lot!
doc_frequency=Counter()
for item in job_descriptions:
doc_frequency.update(item) # add all the words to the counter table and count the frequency of each words
#print doc_frequency.most_common(10)
print 'Done with collecting the job postings!'
print 'There were', len(job_descriptions), 'jobs successfully found.'
# Obtain our key terms and store them in a dict. These are the key data science skills we are looking for
overall_total_skills=skills_dict(doc_frequency)
final_frame = pd.DataFrame(overall_total_skills.items(), columns = ['Term', 'NumPostings']) # Convert these terms to a
# dataframe
# Change the values to reflect a percentage of the postings
final_frame.NumPostings = (final_frame.NumPostings)*100/len(job_descriptions) # Gives percentage of job postings
# having that term
# Sort the data for plotting purposes
final_frame.sort_values('NumPostings', ascending = False, inplace = True)
print final_frame
today = datetime.date.today()
# Get it ready for a bar plot
final_plot = final_frame.plot(x = 'Term', kind = 'bar', legend = None,
title = 'Percentage of Data Scientist Job Ads with a Key Skill, '+country+', '+str(today))
final_plot.set_ylabel('Percentage Appearing in Job Ads')
fig = final_plot.get_figure() # Have to convert the pandas plot object to a matplotlib object
fig.savefig(country+".pdf")
skills_info_TW104()
"""
Explanation: Now, let's do something slightly more serious:
Remark: the following functions for web crawling were originally written by Dr. Steinweg-Woods (https://jessesw.com/Data-Science-Skills/). His code was not up to date and some issues exist. I have cured some problems.
End of explanation
"""
skills_info(city = 'San Francisco', state = 'CA')
skills_info(city = 'New York', state = 'NY')
"""
Explanation: 31.10.2016
the "urllib2" package seems buggy and will request pages which are out of date. Let's use the "requests" (which uses the "urllib3") package instead.
End of explanation
"""
|
fredhohman/pymks | notebooks/elasticity_3D.ipynb | mit | %matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import timeit as tm
"""
Explanation: Linear Elasticity in 3D
Introduction
This example provides a demonstration of using PyMKS to compute the linear strain field for a two phase composite material in 3D, and presents a comparison of the computational efficiency of MKS when compared with the finite element method. The example first provides information on the boundary conditions used in MKS. Next, delta microstructures are used to calibrate the first order influence coefficients. The influence coefficients are then used to compute the strain field for a random microstructure. Lastly, the calibrated influence coefficients are scaled up and are used to compute the strain field for a larger microstructure and compared with results computed using finite element analysis.
Elastostatics Equations and Boundary Conditions
A review of the governing field equations for elastostatics can be found in the Linear Elasticity in 2D example. The same equations are used in the example with the exception that the second lame parameter (shear modulus) $\mu$ is defined differently in 3D.
$$ \mu = \frac{E}{2(1+\nu)} $$
In general, generateing the calibration data for the MKS requires boundary conditions that are both periodic and displaced, which are quite unusual boundary conditions. The ideal boundary conditions are given by,
$$ u(L, y, z) = u(0, y, z) + L\bar{\varepsilon}_{xx} $$
$$ u(0, L, L) = u(0, 0, L) = u(0, L, 0) = u(0, 0, 0) = 0 $$
$$ u(x, 0, z) = u(x, L, z) $$
$$ u(x, y, 0) = u(x, y, L) $$
End of explanation
"""
n = 9
center = (n - 1) / 2
from pymks.tools import draw_microstructures
from pymks.datasets import make_delta_microstructures
X_delta = make_delta_microstructures(n_phases=2, size=(n, n, n))
draw_microstructures(X_delta[:, center])
"""
Explanation: Modeling with MKS
Calibration Data and Delta Microstructures
The first order MKS influence coefficients are all that is needed to compute a strain field of a random microstructure as long as the ratio between the elastic moduli (also known as the contrast) is less than 1.5. If this condition is met we can expect a mean absolute error of 2% or less when comparing the MKS results with those computed using finite element methods [1].
Because we are using distinct phases and the contrast is low enough to only need the first order coefficients, delta microstructures and their strain fields are all that we need to calibrate the first order influence coefficients [2].
The make_delta_microstructure function from pymks.datasets can be used to create the two delta microstructures needed to calibrate the first order influence coefficients for a two phase microstructure. This function uses the python module SfePy to compute the strain fields using finite element methods.
End of explanation
"""
from pymks.datasets import make_elastic_FE_strain_delta
from pymks.tools import draw_microstructure_strain
elastic_modulus = (80, 120)
poissons_ratio = (0.3, 0.3)
macro_strain = 0.02
size = (n, n, n)
t = tm.time.time()
X_delta, strains_delta = make_elastic_FE_strain_delta(elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio,
size=size, macro_strain=macro_strain)
print 'Elapsed Time',tm.time.time() - t, 'Seconds'
"""
Explanation: Using delta microstructures for the calibration of the first order influence coefficients is essentially the same as using a unit impulse response to find the kernel of a system in signal processing. Delta microstructures are composed of only two phases. One phase is located only at the center cell of the microstructure, and the rest made up of the other phase.
Generating Calibration Data
The make_elasticFEstrain_delta function from pymks.datasets provides an easy interface to generate delta microstructures and their strain fields, which can then be used for calibration of the influence coefficients. The function calls the ElasticFESimulation class to compute the strain fields with the boundary conditions given above.
In this example, lets look at a two phase microstructure with elastic moduli values of 80 and 120 and Poisson's ratio values of 0.3 and 0.3 respectively. Let's also set the macroscopic imposed strain equal to 0.02. All of these parameters used in the simulation must be passed into the make_elasticFEstrain_delta function.
End of explanation
"""
draw_microstructure_strain(X_delta[0, center, :, :], strains_delta[0, center, :, :])
"""
Explanation: Let's take a look at one of the delta microstructures and the $\varepsilon_{xx}$ strain field.
End of explanation
"""
from pymks import MKSLocalizationModel
from pymks.bases import PrimitiveBasis
prim_basis = PrimitiveBasis(n_states=2)
model = MKSLocalizationModel(basis=prim_basis)
"""
Explanation: Calibrating First Order Influence Coefficients
Now that we have the delta microstructures and their strain fields, we can calibrate the influence coefficients by creating an instance of a bases and the MKSLocalizationModel class. Because we have 2 discrete phases we will create an instance of the PrimitiveBasis with n_states equal to 2, and then pass the basis in to create an instance of the MKSLocalizationModel. The delta microstructures and their strain fields are then passed to the fit method.
End of explanation
"""
model.fit(X_delta, strains_delta)
"""
Explanation: Now, pass the delta microstructures and their strain fields into the fit method to calibrate the first order influence coefficients.
End of explanation
"""
from pymks.tools import draw_coeff
coeff = model.coeff
draw_coeff(coeff[center])
"""
Explanation: That's it, the influence coefficient have be calibrated. Let's take a look at them.
End of explanation
"""
from pymks.datasets import make_elastic_FE_strain_random
np.random.seed(99)
t = tm.time.time()
X, strain = make_elastic_FE_strain_random(n_samples=1, elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio, size=size, macro_strain=macro_strain)
print 'Elapsed Time',(tm.time.time() - t), 'Seconds'
draw_microstructure_strain(X[0, center] , strain[0, center])
"""
Explanation: The influence coefficients for $l=0$ have a Gaussian-like shape, while the influence coefficients for $l=1$ are constant-valued. The constant-valued influence coefficients may seem superfluous, but are equally as import. They are equivalent to the constant term in multiple linear regression with categorical variables.
Predict of the Strain Field for a Random Microstructure
Let's now use our instance of the MKSLocalizationModel class with calibrated influence coefficients to compute the strain field for a random two phase microstructure and compare it with the results from a finite element simulation.
The make_elasticFEstrain_random function from pymks.datasets is an easy way to generate a random microstructure and its strain field results from finite element analysis.
End of explanation
"""
t = tm.time.time()
strain_pred = model.predict(X)
print 'Elapsed Time',tm.time.time() - t,'Seconds'
"""
Explanation: Note that the calibrated influence coefficients can only be used to reproduce the simulation with the same boundary conditions that they were calibrated with
Now to get the strain field from the MKSLocalizationModel just pass the same microstructure to the predict method.
End of explanation
"""
from pymks.tools import draw_strains_compare
draw_strains_compare(strain[0, center], strain_pred[0, center])
"""
Explanation: Finally let's compare the results from finite element simulation and the MKS model.
End of explanation
"""
from pymks.tools import draw_differences
draw_differences([strain[0, center] - strain_pred[0, center]], ['Finite Element - MKS'])
"""
Explanation: Let's look at the difference between the two plots.
End of explanation
"""
m = 3 * n
center = (m - 1) / 2
t = tm.time.time()
X = np.random.randint(2, size=(1, m, m, m))
"""
Explanation: The MKS model is able to capture the strain field for the random microstructure after being calibrated with delta microstructures.
Resizing the Coefficeints to use on Larger Microstructures
The influence coefficients that were calibrated on a smaller microstructure can be used to predict the strain field on a larger microstructure though spectral interpolation [3], but accuracy of the MKS model drops slightly. To demonstrate how this is done, let's generate a new larger $m$ by $m$ random microstructure and its strain field.
End of explanation
"""
model.resize_coeff(X[0].shape)
"""
Explanation: The influence coefficients that have already been calibrated need to be resized to match the shape of the new larger microstructure that we want to compute the strain field for. This can be done by passing the shape of the new larger microstructure into the 'resize_coeff' method.
End of explanation
"""
from pymks.tools import draw_strains
t = tm.time.time()
strain_pred = model.predict(X)
print 'Elapsed Time',(tm.time.time() - t), 'Seconds'
draw_microstructure_strain(X[0, center], strain_pred[0, center])
"""
Explanation: Because the coefficients have been resized, they will no longer work for our original $n$ by $n$ sized microstructures they were calibrated on, but they can now be used on the $m$ by $m$ microstructures. Just like before, just pass the microstructure as the argument of the predict method to get the strain field.
End of explanation
"""
|
dato-code/tutorials | dss-2016/lead_scoring/lead_scoring_tutorial.ipynb | apache-2.0 | from __future__ import print_function
import graphlab as gl
"""
Explanation: 1. Introduction
The scenario: suppose we run an online travel agency. We would like to convince our users to book overseas vacations, rather than domestic ones. Each of the users in this dataset will definitely book something at the end of a given trial period, i.e. we are only looking at engaged customers.
Goals:
1. predict which new users are most likely to book an overseas trip,
2. generate segmention rules to group similar users based on features and propensity to convert.
Data: mimics the AirBnB challenge on Kaggle.
- Users
- Website or app sessions.
I've simulated data that's very similar in terms of features and distributions, but I've added timestamps to the sessions, and changed the target from country to a binary domestic vs. international variable.
Sections:
1. Introduction
2. The basic scenario - account data only
3. What's happening under the hood?
4. Incorporating activity data.
End of explanation
"""
users = gl.SFrame('synthetic_airbnb_users.sfr')
users.print_rows(3)
users['status'].sketch_summary()
"""
Explanation: 2. The basic scenario
Import the data: sales accounts
Sales accounts need not be synonymous with users, although that is the case here. At Turi, our sales accounts consist of a mix of individual users, companies, and teams within large companies.
The accounts dataset typically comes from a customer relationship management (CRM) tool, like Salesforce, SAP, or Hubspot. In practice there is an extra step here of extracting the data from that system into an SFrame.
End of explanation
"""
status_code = {'international': 1,
'domestic': -1,
'new': 0}
users['outcome'] = users['status'].apply(lambda x: status_code[x])
users[['status', 'outcome']].print_rows(10)
"""
Explanation: Encode the target variable
Three types of accounts.
- Successful accounts, i.e conversions, are coded as 1.
- Failed accounts are coded as -1.
- Open accounts, i.e. accounts that have not been decided, are coded as 0.
Together, successful and failed accounts constitute the training accounts.
End of explanation
"""
user_schema = {'conversion_status': 'outcome',
'account_id': 'id',
'features': ['gender', 'age', 'signup_method', 'signup_app',
'first_device_type', 'first_browser']}
"""
Explanation: Define the schema
In a complex problem like lead scoring, there are potentially many columns with "meaning". To help the lead scoring tool recognize these columns, we define a dictionary that maps standard lead scoring inputs to the columns in our particular dataset.
End of explanation
"""
scorer = gl.lead_scoring.create(users, user_schema)
"""
Explanation: Create the lead scoring tool
All accounts are passed to the tool when it's created. There is no separate predict method.
- We typically want to score the same set of open accounts each day during the trial period.
- Very rarely do we want to predict lead scores for different accounts.
- It makes more sense to keep the open accounts in the model, so we can incrementally update the lead scores and market segments, as new data comes in.
- The update method is not yet implemented :(
End of explanation
"""
print(scorer)
scorer.open_account_scores.head(3)
scorer.open_account_scores.topk('conversion_prob', k=3)
scorer.training_account_scores.head(3)
scorer.segment_descriptions.head(3)
scorer.segment_descriptions[['segment_id', 'segment_features']].print_rows(max_column_width=65)
"""
Explanation: Retrieve the model output and export
There's a lot of stuff in the lead scoring model's summary. Let's focus on the accessible fields, three in particular:
- open_account_scores: conversion probability and market segment for open accounts
- training_account_scores: conversion probability and market segment for existing successes and failures
- segment_descriptions: definitions and summary statistics for the market segments
End of explanation
"""
seg = scorer.training_account_scores.filter_by(8, 'segment_id').head(3)
print(seg)
"""
Explanation: To get the training or open accounts that belong to a particular market segment, use the respective SFrame's filter_by method.
End of explanation
"""
print(scorer.scoring_model)
"""
Explanation: 3. What's happening under the hood?
The scoring model: gradient boosted trees
End of explanation
"""
scorer2 = gl.lead_scoring.create(users, user_schema, max_iterations=20, verbose=False)
print("Original num trees:", scorer.scoring_model.num_trees)
print("New num trees:", scorer2.scoring_model.num_trees)
"""
Explanation: Additional keyword arguments to the lead scoring create function are passed through to the gradient boosted trees model.
End of explanation
"""
print("Validation accuracy:", scorer.scoring_model.validation_accuracy)
"""
Explanation: Validating the scoring model
By default, the gradient boosted trees model withholds ??? percent of the training accounts as a validation set. The validation accuracy can be accessed as a user.
End of explanation
"""
print(scorer.segmentation_model)
"""
Explanation: The segmentation model: decision tree
End of explanation
"""
scorer2 = scorer.resize_segmentation_model(max_segments=20)
print("original number of segments:", scorer.segment_descriptions.num_rows())
print("new number of segments:", scorer2.segment_descriptions.num_rows())
"""
Explanation: Because training the lead scoring tool can take some time with large datasets, the number of segments can be changed after a lead scoring tool has been created. This function creates a new model, the original model is immutable.
End of explanation
"""
sessions = gl.SFrame('synthetic_airbnb_sessions.sfr')
sessions = gl.TimeSeries(sessions, index='timestamp')
sessions.head(5)
"""
Explanation: 4. Incorporating activity data
Account activity data describes interactions between accounts and aspects of your business, like web assets, email campaigns, or products. Conceptually, each interaction involves at a minimum:
- an account
- a timestamp
Interactions may also have:
- an "item"
- a user
- other features
End of explanation
"""
session_schema = {'account_id': 'user_id',
'item': 'action_detail'}
"""
Explanation: As with the accounts table, we need to indicate which columns in the activity table mean what. If we had a column indicating which user was involved, we could specify that as well here. In this scenario, we don't have users that are distinct from accounts.
End of explanation
"""
user_schema.update({'open_date': 'date_account_created',
'decision_date': 'booking_date'})
"""
Explanation: Define relevant dates
To use account activity data, a lead scoring tool needs to know the time window for each account's relevant interactions. There are three key dates for each account.
open date: when a new sales account was created
close date: when the trial period ends for a new sales account
decision date: when a final decision was reached by a training account, either success (conversion) or failure. May be before or after the close date.
The trial duration is the difference between the open date and the close date. The lead scoring tool in GLC assumes this is fixed for all accounts, but in general this need not be the case.
Open accounts do not have a decision date yet, by definition. They may or may not be still within the trial period.
End of explanation
"""
import datetime as dt
scorer3 = gl.lead_scoring.create(users, user_schema,
sessions, session_schema,
trial_duration=dt.timedelta(days=30))
print(scorer3)
"""
Explanation: The trial duration is represented by an instance of the datetime package's timedelta class.
Create the lead scoring tool
End of explanation
"""
invalid_ids = scorer3.invalid_accounts
print(invalid_ids)
invalid_accounts = users.filter_by(invalid_ids, 'id')
invalid_accounts[['id', 'date_account_created', 'booking_date']].print_rows(3)
"""
Explanation: Under the hood: date-based data validation
Invalid accounts have a decision date earlier than their open date. This is impossible, and these accounts are simply dropped from the set of training accounts.
End of explanation
"""
print(scorer3.num_implicit_failures)
"""
Explanation: Implicit failure accounts are accounts that are open, but have been open for so long they are extremely unlikely to convert.
The threshold for implicit failure is the 95th percentile of the time it took training accounts to reach a decision, or the trial period duration, whichever is longer.
Implicit failures are inluded in both the training and open account output, because they are used to train the scoring and segmentation models, but are technically still open.
The user doesn't have to explicitly specify failure accounts - the model can do that automatically.
End of explanation
"""
scorer3.final_features
"""
Explanation: Under the hood: activity-based feature engineering
The lead scoring tool constructs account-level features based on the number of interactions, items, and users (not applicable in this scenario) per day that the accounts are open (up to the maximum of the trial duration). The names of these features are accessible as a model field.
End of explanation
"""
scorer3.open_account_scores.print_rows(3)
"""
Explanation: The values for these features are included in the primary model outputs (training_account_scores and open_account_scores).
End of explanation
"""
cols = ['segment_features', 'median_conversion_prob', 'num_training_accounts']
scorer3.segment_descriptions[cols].print_rows(max_row_width=80, max_column_width=60)
"""
Explanation: The activity-based features are also used to define market segments.
End of explanation
"""
print("Account-only validation accuracy:", scorer.scoring_model.validation_accuracy)
print("Validation accuracy including activity features:", scorer3.scoring_model.validation_accuracy)
"""
Explanation: Results: improved validation accuracy
End of explanation
"""
|
widdowquinn/SI_Holmes_etal_2017 | notebooks/01-data_qa.ipynb | mit | %pylab inline
import os
import random
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
from Bio import SeqIO
import tools
"""
Explanation: <img src="images/JHI_STRAP_Web.png" style="width: 150px; float: right;">
Supplementary Information: Holmes et al. 2020
1. Data cleaning, normalisation and quality assurance
This notebook describes raw data import, cleaning, and QA, then writing out of processed data to the data/ subdirectory, for use in model fitting.
Table of Contents
Microarray data
Import array data
Data QA
Problematic probes
Interpolation for problematic probes
Normalisation
Wide to long form
Probe matches to Sakai and DH10B
Write data
Python imports
End of explanation
"""
# Input array data filepaths
controlarrayfile = os.path.join('..', 'data', 'control_unix_endings_flags.csv') # control experiment array data (preprocessed)
treatmentarrayfile = os.path.join('..', 'data', 'treatment_unix_endings.csv') # treatment experiment array data (preprocessed)
"""
Explanation: Microarray data <a id="microarray_data"></a>
<div class="alert alert-warning">
Raw array data was previously converted to plain text comma-separated variable format from two `Excel` files:
<ul>
<li> The file `AH alldata 12082013.xlsx` was converted to `data/treatment_unix_endings.csv`
<li> The file `AH alldata expt1 flagged 05092013.xlsx` was converted to `data/control_unix_endings_flags.csv`
</ul>
</div>
These describe microarray results for samples that underwent two treatments:
in vitro growth only - i.e. control: data/control_unix_endings_flags.csv
in vitro growth and plant passage - i.e. treatment: data/treatment_unix_endings.csv
End of explanation
"""
control = pd.read_csv(controlarrayfile, sep=',', skiprows=4, index_col=0)
treatment = pd.read_csv(treatmentarrayfile, sep=',', skiprows=4, index_col=0)
# Uncomment the lines below to inspect the first few rows of each dataframe
#control.head()
#treatment.head()
len(control)
"""
Explanation: Import array data <a id="import_data"></a>
End of explanation
"""
colnames_in = ['Raw', 'Raw.1', 'Raw.2', 'Raw.3', 'Raw.4', 'Raw.5'] # raw data columns
colnames_out = ['input.1', 'output.1', 'input.2', 'output.2', 'input.3', 'output.3'] # renamed raw data columns
# Reduce control and treatment arrays to raw data columns only
control = control[colnames_in]
control.columns = colnames_out
treatment = treatment[colnames_in]
treatment.columns = colnames_out
"""
Explanation: <div class="alert alert-warning">
We reduce the full dataset to only the raw intensity values. We also rename the columns in each of the `control` and `treatment` dataframes.
</div>
In both control and treatment datasets, the mapping of experimental samples (input and output) across the three replicates is:
replicate 1 input: Raw $\rightarrow$ input.1
replicate 1 output: Raw.1 $\rightarrow$ output.1
replicate 2 input: Raw.2 $\rightarrow$ input.2
replicate 2 output: Raw.3 $\rightarrow$ output.2
replicate 3 input: Raw.4 $\rightarrow$ input.3
replicate 3 output: Raw.5 $\rightarrow$ output.3
End of explanation
"""
# Plot correlations for control data
tools.plot_correlation(control);
"""
Explanation: Data QA <a id="data_qa"></a>
We expect that there is good agreement between input and output raw intensities for each replicate control or treatment experiment. We also expect that there should be good agreement across replicates within the controls, and within the treatment. We inspect this agreement visually with a matrix of scatterplots, below.
The plot_correlation() function can be found in the accompanying tools.py module.
End of explanation
"""
# Plot correlations for treatment data
tools.plot_correlation(treatment);
"""
Explanation: There is good visual correlation between the intensities for the control arrays, and the Spearman's R values also indicate good correlation.
End of explanation
"""
# Select outlying treatment input.3 values
treatment.loc[treatment['input.3'] > 4e4]
# Define problem probes:
problem_probes = list(treatment.loc[treatment['input.3'] > 4e4].index)
"""
Explanation: There is - mostly - good visual correlation between the intensities for the control arrays, and the Spearman's R values also indicate good correlation. There appear to be three problematic probes in replicate 3 that we may need to deal with in the data cleanup.
<div class="alert alert-success">
<b>Taken together, these plots indicate:</b>
<ul>
<li> the intensities of the control arrays are systematically larger than intensities for the treatment arrays, suggesting that the effects of noise may be proportionally greater for the treatment arrays. This might be a concern for reliably inferring enrichment or depletion in the treatment.
<li> the control arrays are good candidates for quantile normalisation (QN; $r > 0.95$, with similar density distributions)
<li> the treatment array `input.3` dataset is potentially problematic, due to three treatment probe datapoints with intensities greater than 40,000 units having large leverage.
</ul>
</div>
Problematic probes <a id="problem_probes"></a>
<div class="alert-warning">
We can readily identify problematic probes in treatment replicate 3, as they are the only probes with intensity greater than 40,000.
The problematic probes are:
<ul>
<li> <code>A_07_P000070</code>
<li> <code>A_07_P061472</code>
<li> <code>A_07_P052489</code>
</ul>
</div>
End of explanation
"""
# Interpolate values
treatment.set_value(index=problem_probes, col='input.3',
value=treatment.loc[problem_probes][['input.1', 'input.2']].mean(1))
treatment.loc[problem_probes]
"""
Explanation: Interpolating values for problem probes <a id="interpolation"></a>
We replace the three clear outlying values for the three problematic probes in input.3 of the treatment array with interpolated values. We assume that input.1 and input.2 are typical of the input intensities for these three probes, and take the average of their values to substitute for input.3 for each.
End of explanation
"""
# Plot correlations for treatment data
tools.plot_correlation(treatment);
"""
Explanation: We can visualise the change in correlation for the treatment dataframe that results:
End of explanation
"""
input_cols = ['input.1', 'input.2', 'input.3'] # input columns
output_cols = ['output.1', 'output.2', 'output.3'] # output columns
# Normalise inputs and outputs for control and treatment separately
control_input = tools.quantile_norm(control, columns=input_cols)
control_output = tools.quantile_norm(control, columns=output_cols)
treatment_input = tools.quantile_norm(treatment, columns=input_cols)
treatment_output = tools.quantile_norm(treatment, columns=output_cols)
"""
Explanation: Normalisation <a id="normalisation"></a>
We expect the array intensity distribution to vary according to whether the sample was from the input (strong) or output (weak) set, and whether the sample came from the control or treatment pools. We therefore divide the dataset into four independently-normalised components:
control_input
control_output
treatment_input
treatment_output
<br /><div class="alert-success">
We have established that because the input and output arrays in both control and treatment conditions have strong correlation across all intensities, and have similar intensity distributions, we are justified in using quantile (mean) normalisation.
</div>
End of explanation
"""
# Make violinplots of normalised data
tools.plot_normalised(control_input, control_output,
treatment_input, treatment_output)
"""
Explanation: We visualise the resulting distributions, in violin plots:
End of explanation
"""
# Convert data from wide to long form
data = tools.wide_to_long(control_input, control_output,
treatment_input, treatment_output)
data.head()
"""
Explanation: <div class="alert-success">
These plots illustrate that there is relative reduction in measured array intensity between control and treatment arrays for both the input and output arrays.
</div>
Wide to long form <a id="wide_to_long"></a>
We have four dataframes containing normalised data:
control_input
control_output
treatment_input
treatment_output
Each dataframe is indexed by the array probe systematic name, with three columns that correspond to replicates 1, 2, and 3 for either a control or a treatment run. For downstream analysis we want to organise this data as the following columns:
index: unique ID
probe: probe name (these apply across treatment/control and input/output)
input: normalised input intensity value (for a particular probe and replicate)
output: normalised input intensity value (for a particular probe and replicate)
treatment: 0/1 indicating whether the measurement was made for the control or treatment sample
replicate: 1, 2, 3 indicating which replicate the measurement was made from
<br /><div class="alert-warning">
We will add other columns with relevant data later, and to enable this, we convert the control and treatment data frames from wide (e.g. input.1, input.2, input.3 columns) to long (e.g. probe, input, output, replicate) form - once for the control data, and once for the treatment data. We match on a multi-index of probe and replicate.
</div>
End of explanation
"""
# Visualise input v output distributions
tools.plot_input_output_violin(data)
"""
Explanation: Long form data has some advantages for melting into new arrangments for visualisation, analysis, and incorporation of new data. For instance, we can visualise the distributions of input and output log intensities against each other, as below:
End of explanation
"""
# BLASTN results files
sakai_blastfile = os.path.join('..', 'data', 'probes_blastn_sakai.tab')
dh10b_blastfile = os.path.join('..', 'data', 'probes_blastn_dh10b.tab')
# Obtain a dataframe of unique probes and their BLASTN matches
unique_probe_hits = tools.unique_probe_matches((sakai_blastfile, dh10b_blastfile))
"""
Explanation: <div class="alert-success">
This visualisation again shows that treatment intensities are generally lower than control intensities, but also suggests that the bulk of output intensities are lower than input intensities.
<br /><br />
There is a population of low-intensity values for each set of arrays, however. These appear to have a slight increase in intensity in the output, compared to input arrays.
</div>
Probe matches to Sakai and DH10B <a id="probe_matches"></a>
<div class="alert-warning">
Evidence for potential hybridisation of probes to DH10B or Sakai isolates was determined by default `BLASTN` query of each probe sequence against chromosome and plasmid feature nucleotide sequences from the NCBI records:
<ul>
<li> `GCF_000019425.1_ASM1942v1_cds_from_genomic.fna`
<li> `GCF_000008865.1_ASM886v1_cds_from_genomic.fna`
</ul>
</div>
$ blastn -query Array/probe_seqlist.fas -subject Sakai/GCF_000008865.1_ASM886v1_cds_from_genomic.fna -outfmt 6 -out probes_blastn_sakai.tab -perc_identity 100
$ blastn -query Array/probe_seqlist.fas -subject DH10B/GCF_000019425.1_ASM1942v1_cds_from_genomic.fna -outfmt 6 -out probes_blastn_dh10b.tab -perc_identity 100
We first identify the probes that match uniquely at 100% identity to a single E. coli gene product from either Sakai or DH10B
End of explanation
"""
# Sequence data files
sakai_seqfile = os.path.join('..', 'data', 'Sakai', 'GCF_000008865.1_ASM886v1_cds_from_genomic.fna')
dh10b_seqfile = os.path.join('..', 'data', 'DH10B', 'GCF_000019425.1_ASM1942v1_cds_from_genomic.fna')
# Add locus tag information to each unique probe
unique_probe_hits = tools.annotate_seqdata(unique_probe_hits, (sakai_seqfile, dh10b_seqfile))
"""
Explanation: We then add parent gene annotations to the unique probes:
End of explanation
"""
censored_data = pd.merge(data, unique_probe_hits[['probe', 'match', 'locus_tag']],
how='inner', on='probe')
censored_data.head()
"""
Explanation: <div class="alert-danger">
We will certainly be interested in probes that hybridise unambiguously to Sakai or to DH10B. The [array was however designed to report on several *E. coli* isolates](http://www.ebi.ac.uk/arrayexpress/arrays/A-GEOD-13359/?ref=E-GEOD-46455), and not all probes should be expected to hybridise, so we could consider the non-uniquely matching probes not to be of interest, and censor them.
<br /><br />
A strong reason to censor probes is that we will be estimating locus tag/gene-level treatment effects, on the basis of probe-level intensity measurements. Probes that may be reporting on multiple genes may mislead our model fit, and so are better excluded.
</div>
We exclude non-unique matching probes by performing an inner join between the data and unique_probe_hits dataframes.
End of explanation
"""
# Visually inspect the effect of censoring on distribution
tools.plot_input_output_violin(censored_data)
"""
Explanation: <div class="alert-success">
This leaves us with a dataset comprising:
<ul>
<li> 49872 datapoints (rows)
<li> 8312 unique probes
<li> 6084 unique locus tags
</ul>
</div>
As can be seen in the violin plot below, censoring the data in this way removes a large number of low-intensity probes from all datasets.
End of explanation
"""
# Create output directory
outdir = 'datasets'
os.makedirs(outdir, exist_ok=True)
# Output files
full_dataset = os.path.join(outdir, "normalised_array_data.tab") # all censored data
reduced_probe_dataset = os.path.join(outdir, "reduced_probe_data.tab") # subset of data grouped by probe
reduced_locus_dataset = os.path.join(outdir, "reduced_locus_data.tab") # subset of data grouped by locus tag
"""
Explanation: Write data <a id="write"></a>
<div class="alert-warning">
<b>We write the censored, normalised, long-format data to the `datasets/` subdirectory.</b>
</div>
End of explanation
"""
# Index on probes
indexed_data = tools.index_column(censored_data, 'probe')
# Index on locus tags
indexed_data = tools.index_column(indexed_data, 'locus_tag')
# Index on array (replicate X treatment)
indexed_data = tools.index_column(indexed_data, 'repXtrt')
# Uncomment the line below to inspect the data
#indexed_data.head(20)
# Write the full dataset to file
indexed_data.to_csv(full_dataset, sep="\t", index=False)
"""
Explanation: For modelling with Stan, we assign indexes for common probe ID, locus tag, and array (combination of replicate and treatment) to each probe, before writing out the complete dataset.
End of explanation
"""
# Reduced probe set
reduced_probes = tools.reduce_dataset(indexed_data, 'probe')
reduced_probes.to_csv(reduced_probe_dataset, sep="\t", index=False)
# Reduced locus tag set
reduced_lts = tools.reduce_dataset(indexed_data, 'locus_tag')
reduced_lts.to_csv(reduced_locus_dataset, sep="\t", index=False)
"""
Explanation: For testing, we want to create two data subsets, one containing a reduced number of probes, and one with a reduced number of genes/locus tags.
End of explanation
"""
|
nholtz/structural-analysis | Devel/V05/Testing-Stuff.ipynb | cc0-1.0 | import hashlib
import inspect
import types
types.ClassType
class Bar:
pass
class Foo(Bar):
def __getitem__(s):
pass
type(Foo) is types.ClassType
inspect.getmembers(Foo)
def fdigest(filename):
f = open(filename,mode='rb')
m = hashlib.sha256(f.read())
f.close()
return m.hexdigest()
h = fdigest('Loads.ipynb')
h
def extend(old):
"""This is used as a class decorator to 'extend' class definitions,
for example, over widely dispersed areas. EG:
class Foo(object):
. . .
@extend(Foo)
class Foo:
def meth2(...):
. . .
will result in one class Foo containing all methods, etc."""
def _extend(new,old=old):
if new.__name__ != old.__name__:
raise TypeError("Class names must match: '{}' != '{}'".format(new.__name__,old.__name__))
if type(new) is not types.ClassType:
raise TypeError("Extension class must be an old-style class (not derived from class object)")
if len(new.__bases__) != 0:
raise TypeError("Extension class must not have a base class.")
ng = ['__doc__','__module__']
for a,v in inspect.getmembers(new):
if a not in ng:
if type(v) is types.MethodType:
if v.im_self is None:
v = types.MethodType(v.im_func,None,old)
else:
v = classmethod(v.im_func)
elif type(v) is property:
v = property(v.fget,v.fset,v.fdel)
elif type(v) is types.FunctionType:
v = staticmethod(types.FunctionType(v.func_code,v.func_globals,v.func_name,v.func_defaults,v.func_closure))
setattr(old,a,v)
#print('Set {} in class {} to type {}'.format(a,old.__name__,type(v)))
return old
return _extend
class Foo(object):
def im(s):
return s
@classmethod
def cm(s):
return s
l = inspect.getmembers(Foo)
l
def _cm2(c):
return 'cm2',c
v = l[-2][1].im_self
Foo.cm2 = types.MethodType(_cm2,v,Foo)
type(v),v,l[-2][1].im_class
l = inspect.getmembers(Foo)
l
def _cm3(c):
return '_cm3',c
Foo.cm3 = classmethod(_cm3)
l = inspect.getmembers(Foo)
l
v = l[-2][1]
v.im_self, type(v)
Foo.cm(), Foo.cm2()
class Bar(Foo):
pass
Bar.cm(), Bar.cm2()
Bar.cm3()
from salib import extend
class Zip(object):
def im(c):
return 'Zip.im',c
@classmethod
def cm(c):
return 'Zip.cm',c
@extend(Zip)
class Zip:
def im2(c):
return 'Zip.im2',c
@classmethod
def cm2(c):
return 'Zip.cm2',c
z = Zip()
z.im(), z.cm(), z.im2(), z.cm2()
class Zap(Zip):
def im3(c):
return 'Zap.im3',c
@classmethod
def cm3(c):
return 'Zap.cm3',c
zz = Zap()
zz.im(), zz.cm(), zz.im2(), zz.cm2(), zz.im3(), zz.cm3()
"""
Explanation: Literate Programmin - good explanation.
End of explanation
"""
class Zoop:
pass
Zoop.__module__
import sys
m = sys.modules['__main__']
m
getattr(m,'Zoop')
c = Zoop
m.__dict__[c.__name__]
"""
Explanation: No args to @extend?
The following trick might be usable to extract the old class when its not
specifically given to @extend.
End of explanation
"""
from IPython.core.magic import register_cell_magic
@register_cell_magic('test')
def _test(line,cell):
print('Line:',line)
print('Cell:',cell)
return 13
%%test foo be doo
a + b
c
%%time
n = 1000000
sum(range(n))
%lsmagic
cc = compile('''a=3;
b=4;
c=a*b;
c*10''','here','exec')
eval(cc)
cc
cc.co_code
dir(cc)
cc.co_filename
"""
Explanation: 'test' magic
End of explanation
"""
|
basnijholt/orbitalfield | Paper-figures.ipynb | bsd-2-clause | import numpy as np
import holoviews as hv
import holoviews_rc
import kwant
from fun import *
import os
def ticks(plot, x=True, y=True):
hooks = [tick_marks]
if x:
xticks = [0, 1, 2]
else:
xticks = [(0,''), (1,''), (2,'')]
hooks.append(hide_x)
if y:
yticks = [0, 17, 35]
else:
yticks = [(0, ''), (17, ''), (35, '')]
hooks.append(hide_y)
return plot(plot={'Image': {'xticks': xticks, 'yticks': yticks},
'Overlay': {'final_hooks': hooks}})
def tick_marks(plot, element):
ax = plot.handles['axis']
fig = plot.state
ax.tick_params(which='major', color='k', size=3)
def hide_x(plot, element):
ax = plot.handles['axis']
ax.set_xlabel('')
def hide_y(plot, element):
ax = plot.handles['axis']
ax.set_ylabel('')
hv.notebook_extension()
%output size=100 dpi=250 css={'width': '3.4in'}
renderer = hv.Store.renderers['matplotlib'].instance(fig='pdf', size=100, dpi=250)
from holoviews.plotting.mpl import MPLPlot
MPLPlot.fig_inches = (3.4, None)
"""
Explanation: Download data at: https://data.4tu.nl/repository/uuid:20f1c784-1143-4c61-a03d-7a3454914abb
Run all cells to generate the figures used in the paper.
End of explanation
"""
import matplotlib.cm
import matplotlib.colors as mcolors
colors1 = matplotlib.cm.binary_r(np.linspace(0.5, 1, 128))
colors2 = matplotlib.cm.gist_heat_r(np.linspace(0, 0.8, 127))
colors = np.vstack((colors1, colors2))
mymap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors)
sc_on_side_alpha100 = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A_alpha100.h5')
sc_on_side_no_orb_alpha100 = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A_no_orbital_alpha100.h5')
sc_on_side = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A.h5')
sc_on_side_no_orb = create_holoviews('data/0_to_2T_1x1_angles_sc_on_side_mu_ranging_from_minus_2_to_plus_2_full_phase_diagram_with_correction_A_no_orbital.h5')
"""
Explanation: Load data and create a custom cmap
End of explanation
"""
%%opts Layout [vspace=0] Image (cmap=mymap clims=(-197, 197))
%%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13]
%%opts Path (color='g')
im1 = sc_on_side_no_orb.Phase_diagram.Band_gap[0.5, 0]
im2 = sc_on_side.Phase_diagram.Band_gap[0.5, 0]
im1 = im1.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1)
im2 = im2.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1)
max1 = np.nanmax(im1.Im.Band_gap.data)
max2 = np.nanmax(im2.Im.Band_gap.data)
max_gap = np.max((max1, max2))
sc_on_side_hist = (ticks(im1, x=False).hist(bin_range=(0, max_gap)) +
ticks(im2).hist(bin_range=(0, max_gap)))
sc_on_side_hist.cols(1)
# print the maximum band gaps
print("""The maximum band gap of the top plot is {:.4} meV.
The maximum band gap of the lower plot is {:.4} meV.""".format(max1, max2))
%%opts Layout [vspace=0] Image (cmap=mymap clims=(-197, 197))
%%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13]
%%opts Path (color='g')
im1_alpha100 = sc_on_side_no_orb_alpha100.Phase_diagram.Band_gap[0.5, 0]
im2_alpha100 = sc_on_side_alpha100.Phase_diagram.Band_gap[0.5, 0]
im1_alpha100 = im1_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1)
im2_alpha100 = im2_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1)
max1_alpha100 = np.nanmax(im1_alpha100.Im.Band_gap.data)
max2_alpha100 = np.nanmax(im2_alpha100.Im.Band_gap.data)
max_gap_alpha100 = np.max((max1_alpha100, max2_alpha100))
sc_on_side_hist_alpha100 = (ticks(im1_alpha100, x=False).hist(bin_range=(0, max_gap_alpha100)) +
ticks(im2_alpha100).hist(bin_range=(0, max_gap_alpha100)))
(sc_on_side_hist_alpha100).cols(1)
# renderer.save(sc_on_side_hist, 'paper/figures/sc_on_side_hist', fmt='pdf')
# print the maximum band gaps
print("""The maximum band gap of the top plot is {:.4} meV.
The maximum band gap of the lower plot is {:.4} meV.""".format(max1_alpha100, max2_alpha100))
"""
Explanation: Full phase diagram for superconductor on side of wire
Band gaps
End of explanation
"""
%%opts Layout [vspace=0] Image (clims=(0, 1.5))
%%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13]
%%opts Path (color='g')
im1 = sc_on_side_no_orb.Phase_diagram.Inverse_decay_length[0.5, 0]
im2 = sc_on_side.Phase_diagram.Inverse_decay_length[0.5, 0]
im1 = im1.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1)
im2 = im2.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1)
dat1 = im1.Im.Inverse_decay_length.data
dat2 = im2.Im.Inverse_decay_length.data
dat1[dat1<0] = np.nan
dat2[dat2<0] = np.nan
sc_on_side_length = (ticks(im1, x=False).hist(bin_range=(0, 1)) +
ticks(im2).hist(bin_range=(0, 1)))
sc_on_side_length.cols(1)
%%opts Layout [vspace=0] Image (clims=(0, 1.5))
%%opts Layout [sublabel_position=(-0.4, 0.9) sublabel_format='({alpha})' sublabel_size=13]
%%opts Path (color='g')
im1_alpha100 = sc_on_side_no_orb_alpha100.Phase_diagram.Inverse_decay_length[0.5, 0]
im2_alpha100 = sc_on_side_alpha100.Phase_diagram.Inverse_decay_length[0.5, 0]
im1_alpha100 = im1_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} = 0$", depth=1)
im2_alpha100 = im2_alpha100.relabel(r"$\bm{B} \parallel x, \; \bm{A} \ne 0$", depth=1)
dat1_alpha100 = im1_alpha100.Im.Inverse_decay_length.data
dat2_alpha100 = im2_alpha100.Im.Inverse_decay_length.data
dat1_alpha100[dat1_alpha100<0] = np.nan
dat2_alpha100[dat2_alpha100<0] = np.nan
sc_on_side_length = (ticks(im1_alpha100, x=False).hist(bin_range=(0, 1)) +
ticks(im2_alpha100).hist(bin_range=(0, 1)))
sc_on_side_length.cols(1)
# renderer.save(sc_on_side_length, 'paper/figures/sc_on_side_length', fmt='pdf')
# print the minimum decay lengths in nm
print("""The minimum decay length of the top plot is {:.3} nm.
The minimum decay length of the lower plot is {:.3} nm.""".format(1000 / np.nanmax(dat1), 1000 / np.nanmax(dat2)))
# print the mode of the decay lengths
frequencies, edges = np.histogram(dat1[dat1>0].reshape(-1), bins=400)
max_mode1 = edges[np.argmax(frequencies)]
frequencies, edges = np.histogram(dat2[dat2>0].reshape(-1), bins=400)
max_mode2 = edges[np.argmax(frequencies)]
print("""The maximum mode of the top plot is {:.2} µm^-1.
The maximum mode of the lower plot is {:.2} µm^-1.
The ratio is {:.3}""".format(max_mode1, max_mode2, max_mode1 / max_mode2))
"""
Explanation: Inverse decay length
End of explanation
"""
p = make_params(mu=4.8, orbital=True, V=lambda x,y,z: 2/50 * z, t_interface=7*constants.t/8,
Delta=5, alpha=50, A_correction=False)
momenta = np.linspace(-0.6, 0.6, 200)
def bands(B):
p.B_x, p.B_y, p.B_z = B
bands_fun = kwant.physics.Bands(lead, args=[p])
_bands = np.array([bands_fun(k=k) for k in momenta])
return hv.Path((momenta, _bands), kdims=[r'$k$', r'$E$'])
E = (-1.5, 1.5)
k = (-0.65, 0.65)
lead = make_3d_wire_external_sc(a=constants.a, angle=0)
x1 = bands((0.5, 0, 0)).select(E=E, k=k)
y1 = bands((0, 0.5, 0)).select(E=E, k=k)
z1 = bands((0, 0, 0.5)).select(E=E, k=k)
lead = make_3d_wire_external_sc(a=constants.a)
x2 = bands((0.5, 0, 0)).select(E=E, k=k)
y2 = bands((0, 0.5, 0)).select(E=E, k=k)
z2 = bands((0, 0, 0.5)).select(E=E, k=k)
%%output fig='svg'
%%opts Layout [vspace=0.1 hspace=0.1 sublabel_format='']
%%opts Path (color='k')
def labels(plot, x=False, y=False, label=''):
hooks = [tick_marks]
if not x:
hooks.append(hide_x)
if not y:
hooks.append(hide_y)
plot *= hv.HLine(0)(style=dict(lw=0.5, color='k', ls=(1, (3.0, 3.0))))
return plot.relabel(label)(plot={'Path': {'xticks': 0, 'yticks': 0},
'Overlay': {'final_hooks': hooks}})
opts = {'x': -0.62, 'y': 1.40, 'fontsize': 10, 'valign':'top', 'halign':'left'}
def rectangle(x=opts['x'], y=opts['y']-0.38, width=0.55, height=0.47):
box = np.array([(x,y), (x+width, y), (x+width, y+height), (x, y+height)])
return hv.Polygons([box])(style={'facecolor': '#F0F0F0'})
box2 = rectangle(width=0.55)
box3 = rectangle(width=0.80)
x1_txt = hv.Text(text="$\mathcal{P}$, $\mathcal{R}_x$, $\mathcal{C}'$", **opts) * box3
y1_txt = hv.Text(text="$\mathcal{P}$", **opts)
z1_txt = hv.Text(text="$\mathcal{P}$, $\mathcal{C}'$", **opts) * box2
x2_txt = hv.Text(text="$\mathcal{P}$, $\mathcal{R}_x$", **opts) * box2
y2_txt = hv.Text(text="$\mathcal{P}$", **opts)
z2_txt = hv.Text(text="$\mathcal{P}$", **opts)
gap_line = lambda x: hv.HLine(np.abs(np.array(x.data)[:, :, 1]).min())(style=dict(lw='0.5', c='r', ls=(1., (3., 3.))))
bands_layout = (labels(x1 * x1_txt * gap_line(x1), label=r"$\bm{B}\parallel \hat{x}$", y=True)+
labels((y1 * y1_txt),label=r"$\bm{B}\parallel \hat{y}$") +
labels((z1 * z1_txt * gap_line(z1)), label=r"$\bm{B}\parallel \hat{z}$") +
labels((x2 * x2_txt * gap_line(x2)), x=True, y=True) +
labels((y2 * y2_txt), x=True) +
labels((z2 * z2_txt), x=True)).cols(3)
bands_layout
# renderer.save(bands_layout, 'paper/figures/bandstructure_annotated', fmt='pdf')
"""
Explanation: Band structures
End of explanation
"""
orb = create_holoviews('data/0_to_2T_4x4_angles_misaligned_with_electric_field.h5')
no_orb = create_holoviews('data/0_to_2T_4x4_angles_misaligned_no_orbital_with_electric_field.h5')
%%opts Path (color='g')
%%opts Image.d [colorbar=True cbar_ticks=np.linspace(0, 140, 5).tolist()]
%%opts Layout [vspace=0.20 hspace=0.15 sublabel_position=(-0.07, 0.79) sublabel_size=10 sublabel_format='({alpha})']
%%opts VLine (linewidth=0.5 color='k')
test = orb.Phase_diagram.Band_gap[0, 0.5]
comparing_phase_diagrams = (
ticks((no_orb.Phase_diagram.Band_gap * hv.VLine(1)).relabel(r"$\bm{B} \parallel \hat{x}, \; \bm{A} = 0$")[0.5, 0], x=False)
+ ticks(no_orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel \hat{z}, \; \bm{A} = 0$")[0, 0.5], x=False, y=False)
+ ticks(orb.Phase_diagram.Band_gap.relabel(r"$\bm{B} \parallel \hat{x}, \; \bm{A} \ne 0$")[0.5, 0])
+ ticks(orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel \hat{z}, \; \bm{A} \ne 0$", group='d', depth=2)[0, 0.5], y=False)).cols(2)
comparing_phase_diagrams
# renderer.save(comparing_phase_diagrams, 'paper/figures/comparing_phase_diagrams', fmt='pdf')
"""
Explanation: Comparing phase diagrams
End of explanation
"""
%%opts Path (color='g')
%%opts Image.d [colorbar=True cbar_ticks=np.linspace(0, 120, 5).tolist()]
%%opts Layout [vspace=0.20 hspace=0.15 sublabel_position=(-0.07, 0.79) sublabel_size=10 sublabel_format='({alpha})']
kys = no_orb.Phase_diagram.Band_gap.keys()
test = orb.Phase_diagram.Band_gap[nearest(kys, 0.05), 0.5]
misaligned = (
ticks(no_orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (10, 1, 0)^T, \; \bm{A} = 0$")[0.5, nearest(kys, 0.05)], x=False)
+ ticks(no_orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (0, 1, 10)^T, \; \bm{A} = 0$")[nearest(kys, 0.05), 0.5], x=False, y=False)
+ ticks(orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (10, 1, 0)^T, \; \bm{A} \ne 0$")[0.5, nearest(kys, 0.05)])
+ ticks(orb.Phase_diagram.Band_gap.relabel(label=r"$\bm{B} \parallel (0, 1, 10)^T, \; \bm{A} \ne 0$", group='d', depth=2)[nearest(kys, 0.05), 0.5], y=False)).cols(2)
misaligned
# renderer.save(misaligned, 'paper/figures/misaligned', fmt='pdf')
"""
Explanation: Comparing phase diagrams, misaligned fields
End of explanation
"""
# import os
# from scripts.hpc05 import HPC05Client
# os.environ['SSH_AUTH_SOCK'] = os.path.join(os.path.expanduser('~'), 'ssh-agent.socket')
# cluster = HPC05Client()
# v = cluster[:]
# v.use_dill()
# lview = cluster.load_balanced_view()
# len(v)
# %%px
# import sys
# import os
# sys.path.append(os.path.join(os.path.expanduser('~'), 'orbitalfield'))
# import kwant
# import numpy as np
# from fun import *
# lead = make_3d_wire()
# p = make_params(orbital=False, B_x=1)
lead = make_3d_wire()
p = make_params(orbital=False, B_x=1)
mus = np.linspace(0, 35, 2000)
if os.path.exists('data/gaps_plot.npy'):
gaps = np.load('data/gaps_plot.npy')
else:
print('Start cluster with the cells above.')
gaps = lview.map_async(lambda mu: find_gap(lead, p, ((1, 0, 0), mu, True), tol=1e-4), mus).result()
np.save('data/gaps_plot', gaps)
if os.path.exists('data/spectrum_ev_plot.npy'):
Es = np.load('data/spectrum_ev_plot.npy')
else:
Es = np.array([kwant.physics.Bands(lead, args=[p])(k=0) for p.mu in mus])
np.save('data/spectrum_ev_plot', Es)
%%output fig='svg'
%%opts VLine (lw=0.5) HLine (lw=0.5, color='g')
%%opts Layout [vspace=.35 aspect_weight=1 sublabel_position=(-0.3, 0.9) sublabel_format='({alpha})' sublabel_size=13]
%%opts Overlay [yticks=3 aspect=1.5 vspace=0.]
E_dim = hv.Dimension(('E_k0', r'$E(k=0)$'), unit='meV')
spectrum = hv.Path((mus, Es), kdims=[dimensions.mu, E_dim])
ind_E = 100
idx = np.argsort(np.min(np.abs(Es), axis=1))
VPoints = hv.Points([(mus[ind_E], E) for E in Es[ind_E]])
p.mu = 0
phase_bounds = np.sort(find_phase_bounds(lead, p, (1, 0, 0), num_bands=40).real)[::2]
HPoints = hv.Points([(x, 0) for x in phase_bounds if x > 0])(style={'color': 'g'})
ev_plot = (spectrum * hv.VLine(mus[ind_E]) * VPoints * HPoints * hv.HLine(0))[:35, -10:10]
bool_array = np.array(np.digitize(mus, phase_bounds)%2, dtype=bool)
gaps_plot = (spectrum
* hv.Area((mus, np.array(gaps) * bool_array))(style={'facecolor': '#FF6700'})
* hv.Area((mus, np.array(gaps) * ~bool_array))(style={'facecolor': '#a9a9a9'})
* hv.HLine(0) * HPoints)
gaps_plot = gaps_plot.map(lambda x: x.clone(extents=(0, 0, 35, 0.2)), [hv.Element])
ev_problem = (ev_plot[:, -8:8](plot={'xticks':[(0, ''), (8, ''), (16, ''), (24, ''), (32, '')],
'final_hooks': [tick_marks, hide_x]}) +
gaps_plot(plot={'xticks': 5, 'final_hooks': [tick_marks]})).cols(1)
ev_problem
# renderer.save(ev_problem, 'paper/figures/ev_problem', fmt='pdf')
"""
Explanation: Eigenvalue problem graphic
Uncomment the lower cells and start an ipcluster to calculate the spectrum.
End of explanation
"""
|
metpy/MetPy | v0.9/_downloads/d5ee7fed8071553be26c422a7518141c/isentropic_example.ipynb | bsd-3-clause | import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
"""
Explanation: Isentropic Analysis
The MetPy function mpcalc.isentropic_interpolation allows for isentropic analysis from model
analysis data in isobaric coordinates.
End of explanation
"""
data = xr.open_dataset(get_test_data('narr_example.nc', False))
print(list(data.variables))
"""
Explanation: Getting the data
In this example, NARR reanalysis data for 18 UTC 04 April 1987 from the National Centers
for Environmental Information (https://www.ncdc.noaa.gov/data-access/model-data)
will be used.
End of explanation
"""
# Assign data to variable names
lat = data['lat']
lon = data['lon']
lev = data['isobaric']
times = data['time']
tmp = data['Temperature'][0]
uwnd = data['u_wind'][0]
vwnd = data['v_wind'][0]
spech = data['Specific_humidity'][0]
# pint doesn't understand gpm
data['Geopotential_height'].attrs['units'] = 'meter'
hgt = data['Geopotential_height'][0]
"""
Explanation: We will reduce the dimensionality of the data as it is pulled in to remove an empty time
dimension.
End of explanation
"""
isentlevs = [296.] * units.kelvin
"""
Explanation: To properly interpolate to isentropic coordinates, the function must know the desired output
isentropic levels. An array with these levels will be created below.
End of explanation
"""
isent_anal = mpcalc.isentropic_interpolation(isentlevs,
lev,
tmp,
spech,
uwnd,
vwnd,
hgt,
tmpk_out=True)
"""
Explanation: Conversion to Isentropic Coordinates
Once three dimensional data in isobaric coordinates has been pulled and the desired
isentropic levels created, the conversion to isentropic coordinates can begin. Data will be
passed to the function as below. The function requires that isentropic levels, isobaric
levels, and temperature be input. Any additional inputs (in this case relative humidity, u,
and v wind components) will be linearly interpolated to isentropic space.
End of explanation
"""
isentprs, isenttmp, isentspech, isentu, isentv, isenthgt = isent_anal
isentu.ito('kt')
isentv.ito('kt')
"""
Explanation: The output is a list, so now we will separate the variables to different names before
plotting.
End of explanation
"""
print(isentprs.shape)
print(isentspech.shape)
print(isentu.shape)
print(isentv.shape)
print(isenttmp.shape)
print(isenthgt.shape)
"""
Explanation: A quick look at the shape of these variables will show that the data is now in isentropic
coordinates, with the number of vertical levels as specified above.
End of explanation
"""
isentrh = 100 * mpcalc.relative_humidity_from_specific_humidity(isentspech, isenttmp, isentprs)
"""
Explanation: Converting to Relative Humidity
The NARR only gives specific humidity on isobaric vertical levels, so relative humidity will
have to be calculated after the interpolation to isentropic space.
End of explanation
"""
# Set up our projection
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Coordinates to limit map area
bounds = [(-122., -75., 25., 50.)]
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 245, size='large')
ax = fig.add_subplot(1, 1, 1, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the surface
clevisent = np.arange(0, 1000, 25)
cs = ax.contour(lon, lat, isentprs[level, :, :], clevisent,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Isentropic Pressure (hPa), Wind (kt), Relative Humidity (percent)'
.format(isentlevs[level].m), loc='left')
add_timestamp(ax, times[0].dt, y=0.02, high_contrast=True)
fig.tight_layout()
"""
Explanation: Plotting the Isentropic Analysis
End of explanation
"""
# Calculate Montgomery Streamfunction and scale by 10^-2 for plotting
msf = mpcalc.montgomery_streamfunction(isenthgt, isenttmp) / 100.
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 250, size='large')
ax = plt.subplot(111, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.5)
# Plot the surface
clevmsf = np.arange(0, 4000, 5)
cs = ax.contour(lon, lat, msf[level, :, :], clevmsf,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs.
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Montgomery Streamfunction '.format(isentlevs[level].m) +
r'($10^{-2} m^2 s^{-2}$), ' +
'Wind (kt), Relative Humidity (percent)', loc='left')
add_timestamp(ax, times[0].dt, y=0.02, pretext='Valid: ', high_contrast=True)
fig.tight_layout()
plt.show()
"""
Explanation: Montgomery Streamfunction
The Montgomery Streamfunction, ${\psi} = gdz + CpT$, is often desired because its
gradient is proportional to the geostrophic wind in isentropic space. This can be easily
calculated with mpcalc.montgomery_streamfunction.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cams/cmip6/models/sandbox-2/landice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cams', 'sandbox-2', 'landice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: CAMS
Source ID: SANDBOX-2
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:43
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation
"""
|
ComputationalModeling/spring-2017-danielak | past-semesters/spring_2016/homework_assignments/Homework_5.ipynb | agpl-3.0 | %matplotlib inline
import matplotlib.pyplot as plt
from string import punctuation
import urllib.request
files=['negative.txt','positive.txt']
path='http://www.unc.edu/~ncaren/haphazard/'
for file_name in files:
urllib.request.urlretrieve(path+file_name,file_name)
pos_sent = open("positive.txt").read()
positive_words=pos_sent.split('\n')
neg_sent = open("negative.txt").read()
negative_words=neg_sent.split('\n')
"""
Explanation: Homework #5 - Using Bag of Words on the 2016 Presidential Race
This homework assignment expands upon our in-class work analyzing twitter data, focusing on the 2016 presidential elections. Please make sure to get started early, and come by the instructors' office hours if you have any questions. Office hours and locations can be found in the course syllabus. IMPORTANT: While it's fine if you talk to other people in class about this homework - and in fact we encourage it! - you are responsible for creating the solutions for this homework on your own, and each student must submit their own homework assignment.
Name
// Put your name here
Off to the races
Let's see how positive the 2016 Presidential candidate race is so far. I have downloaded Twitter feed data using the tweepy module. You can download the Tweepy.ipynb example from the class D2L website to see how this was done, but it's not necessary to do so if you don't want to. We are going to make a bar graph of each of the candidate's tweets and see how positive their campaign is so far.
The first thing to do is review how we downloaded the Twitter files using the code from the in-class assignment.
End of explanation
"""
twitter_names = [ 'BarackObama', 'realDonaldTrump','HillaryClinton', 'BernieSanders', 'tedcruz']
"""
Explanation: For this homework we are going to use new data downloaded in the last week. The file names are:
<<twitter_names>>_tweets.txt
Where Twitter_names is the Twitter name of the individual polititions. The following Twitter names are available for download:
End of explanation
"""
# Write your code to download the tweet files from each of the politicians.
"""
Explanation: So, for example, the file name for Barack Obama is BarackObama_tweets.txt. The first step is to rewrite the above loop to download all five files and save them to your local directory.
NOTE: these files are no longer on Dr. Neal Caren's Website. We have posted them locally at MSU, at http://www.msu.edu/~colbrydi. As a result, the full URL and file name for Barack Obama is http://www.msu.edu/~colbrydi/BarackObama_tweets.txt
Step 1: Download Data Download the tweet data
End of explanation
"""
def tweetcount(tweet, positive_words, negative_words):
#Put your code here and modify the return statement
return 0.5
"""
Explanation: Step 2: Create a tweetcount function
Let's see if we can make a function (let's call it tweetcount). The function should take a tweet string, positive_words, and negative_words as inputs and return a "positiveness" ratio - i.e., count all the positive words and subtract all of the negative words and divide by the total words in the tweet (HINT: See the pre-class and in-class assignments!). This value should be a number that ranges from -1.0 to 1.0.
I put in a "stub" function to help get you started. Just fill in your own code.
A code "stub" is a common way to outline a program without getting into all of the details. You can define what your functions need as inputs and ouputs without needing to write the function code. It is common practice to output an "invalid" value (0.5 in this case). You can then use your "stub" to test other code that will call your function.
End of explanation
"""
tweetcount('We have some delightful new food in the cafeteria. Awesome!!!', positive_words, negative_words)
"""
Explanation: Let's test this on a simple tweet to make sure everything is working. (Note: this is sometimes called "unit testing", and is considered to be very good programming practice.) What answer do you expect?
Since there are two positive words (delightful and Awesome) and 10 words in the tweet, we expect to get a value of 0.2. If you do not get 0.2 then something is wrong in your code. Go back and fix it until you do get 0.2.
End of explanation
"""
tweetcount('We have some delightful new food in the cafeteria. Awesome!!!', positive_words, negative_words)
"""
Explanation: Step 3: Bug Fixing
During the class assignment someone noticed an interesting problem when working with real tweet data. Consider the following test. Can you identify the simple difference between this test and the previous test?
End of explanation
"""
tweet = 'We have some delightful new food in the cafeteria. Awesome!!!';
tweet_processed=tweet.lower()
for p in punctuation:
tweet_processed=tweet_processed.replace(p,'')
words=tweet_processed.split(' ')
print(len(words))
tweet = 'We have some delightful new food in the cafeteria. Awesome!!!';
tweet_processed=tweet.lower()
for p in punctuation:
tweet_processed=tweet_processed.replace(p,'')
words=tweet_processed.split(' ')
print(len(words))
"""
Explanation: I assume you got a different answer (maybe 0.18181818181812)? Take a moment and see if you can figure out why.
Hopefully you noticed there is an extra space between the words "new" and "food." Why would this produce a different answer?
Let's break down the problem some more. Using code from the pre-class assignment, let's remove the punctuation and split the words for both tweets:
End of explanation
"""
tweet = 'We have some delightful new food in the cafeteria. Awesome!!!';
tweet_processed=tweet.lower()
for p in punctuation:
tweet_processed=tweet_processed.replace(p,'')
words=tweet_processed.split(' ')
print(words)
tweet = 'We have some delightful new food in the cafeteria. Awesome!!!';
tweet_processed=tweet.lower()
for p in punctuation:
tweet_processed=tweet_processed.replace(p,'')
words=tweet_processed.split(' ')
print(words)
"""
Explanation: That is interesting. The original tweet had 10 words (what we would expect) and the tweet with the doublespace has one extra word? Where did that come from? Well, let's look by printing the entire list instead of just the length (We can do this because the list is short).
End of explanation
"""
if '' in positive_words:
print('emtpy string in positive words')
if '' in negative_words:
print('emtpy string in negative words')
"""
Explanation: See the empty string represented by the two quotations? It seems that the split() function is adding a "word" which is completely empty between the double spaces. If you think about it, it makes sense, but this can cause trouble. The same problem is occuring in both the positive_words and negative_words lists. You can check this with the following command.
End of explanation
"""
words.remove('')
print(words)
"""
Explanation: This means that anytime there is a doublespace in a tweet that will be counted as both a positive and negative word. This may not be a problem for the "total" emotion (since the net is zero) - however, the extra words impact the total words in the tweet. The best way to avoid this problem is to just remove the doublespaces. For example:
End of explanation
"""
#Put your new function code here.
def tweetcount(tweet, positive_words, negative_words):
#Put your code here
return 0.5
"""
Explanation: Fix your above function to take into account the empty strings. Make sure you consider all cases: for example, what happens if there is no doublespace? What happens if there is more than one?
End of explanation
"""
tweetcount('We have some delightful new food in the cafeteria. Awesome!!!', positive_words, negative_words)
tweetcount('We have some delightful new food in the cafeteria. Awesome!!!', positive_words, negative_words)
"""
Explanation: Test your code here and make sure it returns 0.2 for both tests:
End of explanation
"""
def average_tweet(tweets_list, positive_words, negative_words):
total = 0
## put your code here. Note that this is not a trick question.
## You may be able to do this in just a couple lines of code.
return total / len(tweets_list)
"""
Explanation: Step 4: Total average Tweet
Now let's make a second function (let's call it average_tweet). The function should take tweets_list, positive_words, and negative_words as inputs, loop over all of the tweets in the tweets_list, calulate the tweetcount for each tweet, and store the value. The last step in this function will be to average all of the tweetcounts and return the average.
End of explanation
"""
tweets = open('BarackObama_tweets.txt').read()
tweets = tweets.split('\n')
average_tweet(tweets, positive_words, negative_words)
"""
Explanation: Assuming we wrote this function correctly we can use it to test the tweets in an entire file. For example, the following should work:
End of explanation
"""
average_tweets = []
# Put your loop here. Again, if we call the functions, this code should be fairly short.
"""
Explanation: Step 5: loop over twitter_names
Now create a loop. For each politician, load their tweets file, calculate the average_tweet and appends the average_tweet to a list (one average for each politician).
End of explanation
"""
plt.bar(range(len(average_tweets)), average_tweets);
"""
Explanation: Step 6: Generate a bar graph of candidates Finally, use the plt.bar function to generate a bargraph of candidates. See if you can label the x axis, the y axis, and each individal bar with the candidate's name.
End of explanation
"""
#Put your code here
fig,ax = plt.subplots(1, 1, figsize=(20, 10))
#Center Lables
barlist = plt.bar(range(len(average_tweets)), average_tweets, align='center');
plt.xticks(range(len(average_tweets)), twitter_names, fontsize=20);
plt.yticks(fontsize=20)
plt.xlabel('Candidate', fontsize=24)
plt.ylabel('Emotion', fontsize=24)
barlist[1].set_color('r')
barlist[4].set_color('r')
"""
Explanation: Step 7: Make your graph readable
Okay, now let's practice making the graph easier to read by using Google to find examples and adjust your code. For example, please search for the Python code to make the following adjustments to a figure:
Add labels for the x and y axes (i.e. "Candidates" and "Tweet Emotion Measure" would work)
Set x and y axis font size to 24 point
Rename each bar to the candidate's Twitter name and set the font size to 20
Set the yaxis font size to 20
Make the figure approximatly the same hieght but span more of the notebook from left to right
Center the labels on each of the bars.
Set the x and y axis ticks to a font size of 20
Set the Republican politicians (Trump and Cruz) to red and the Democratic politicians to blue (Obama, Clinton, and Sanders).
Question 1: Type in some example search keywords that you successfully used to help find the solutions. I did the first two to help you get started.
Modify the following list
How do I add x and y labels to a matplotlib figure
How do I change the xlabel font size in matplotlib
Question 2: Using what you learned using google try and generate a new (more readable) bar plot following the guildlines outlined above. Your figure should look something like this (note the values on this bar chart are were picked for illustration, not correctness):
<img src="http://www.msu.edu/~colbrydi/Formatted_BoW_Image.png">
End of explanation
"""
# Put your code here. Add additional cells as necessary!
"""
Explanation: Question 3: In your own words, describe what these results say about the politicians.
// Write your answer here
Question 4: Only one snapshot of data was provided for this homework. How does this snapshot limit the questions you can ask of the data? What type of data would you like to gather to make stronger claims?
// Write your answer here
Question 5: What other scientific questions can you ask using this type of data and model? Write new code to generate a different plot that answers one of the scientific questions that you have devised. Feel free to be expressive when you design your own metrics (for example, you could use the only_positive, only_negative, both and neither counts from the in-class assignment, or you can go completely beyond this). The goal here is to come up with something interesting that can be expressed with this data. More creative questions and answers will be rewarded appropriately!
End of explanation
"""
|
GoogleCloudPlatform/openmrs-fhir-analytics | dwh/test_spark.ipynb | apache-2.0 | print('Hellooo! We use PySpark!')
"""
Explanation: FHIR Analytics with Spark
This notebook serves as a cleaned-up scratchbook for developing queries for processing
FHIR resources extracted from OpenMRS. This is part of the
OpenMRS Analytics Engine.
The notebook is based on Apache Spark and the
output of ETL batch/streamin pipelines in the
openmrs-fhir-analytics
repository.
Each section, shows examples of specific tasks for environment setup, loading, or processing data
in PySpark.
End of explanation
"""
from typing import List
from datetime import datetime
import time
from pyspark import SparkConf
from pyspark.sql import SparkSession, DataFrame
import pyspark.sql.functions as F
import pyspark.sql.types as T
import pandas
BASE_DIR='./test_files/'
BASE_PATIENT_URL='http://localhost:8099/openmrs/ws/fhir2/R4/Patient/'
conf = (SparkConf()
.setMaster('local[20]')
.setAppName('IndicatorsApp')
# See example cells below where this setting is mentioned and needed.
.set('spark.driver.memory', '10g')
.set('spark.executor.memory', '2g')
# Even if we don't explicitly `cache()`, it should happen automatically after shuffles.
# So we need to be careful if we want to change the following (the defaults is 0.6).
#
# NOTE: It seems in some cases automatic caching after shuffle is not happening,
# see examples/notes below!
#
#.set('spark.storage.memoryFraction', 0.2)
.set('spark.authenticate', 'true') # See: https://spark.apache.org/docs/latest/security.html
)
# Starting up Spark
spark = SparkSession.builder.config(conf=conf).getOrCreate()
"""
Explanation: Setting up Spark
We set up a local cluster with 20 nodes. This should be adjusted depending on the
machine on which the Jupyter server is running. Obviously the queries developed here
can be run on a truly distributed Spark cluster as well.
End of explanation
"""
start = time.time()
all_obs = spark.read.parquet(BASE_DIR + 'Observation')
# We can cache all_obs to make future uses faster.
# all_obs.cache()
end = time.time()
print('Elapsed time: ', end - start)
start = time.time()
print('Number of observations= ', all_obs.count())
end = time.time()
print('Elapsed time: ', end - start)
all_patients = spark.read.parquet(BASE_DIR + '/Patient')
print('Number of patients= ', all_patients.count())
all_encounters = spark.read.parquet(BASE_DIR + '/Encounter')
print('Number of encounters= ', all_encounters.count())
start_date='2020-12-09'
end_date='2020-12-10'
obs_filtered = all_obs.select(
'subject.patientId', 'effective.dateTime', 'code', 'value').filter(
all_obs.effective.dateTime > start_date).filter(
all_obs.effective.dateTime < end_date)
print('Number of filtered observations= ', obs_filtered.count())
obs_filtered.head()
# Note the predicate push-down to reading Parquet files.
obs_filtered.explain()
"""
Explanation: Loading Parquet files, sample aggregate queries, and Pandas conversion.
The Patient, Encounter, Observation directories in ./test_files/ contain Parquet files generated by
the batch pipeline from the test OpenMRS docker image in this repo. Many of these experiments
were also repeated on a large dataset by copying Observation resources 64 times, to verify
that the queries are still performant for millions of Observations.
End of explanation
"""
obs_exploded_codes = obs_filtered.select(
obs_filtered.patientId, obs_filtered.dateTime, obs_filtered.value,
F.explode(obs_filtered.code.coding).alias('coding'))
obs_exploded_codes.head(2)
grouped_obs = obs_exploded_codes.groupBy(['coding.code']).agg({'*':'count'})
print('Number of codes= ', grouped_obs.count())
grouped_obs.head(5)
# Repeating the same cell with `all_obs` to check performance difference.
all_obs_codes = all_obs.select(
all_obs.subject.patientId, all_obs.effective.dateTime, all_obs.value,
F.explode(all_obs.code.coding).alias('coding'))
grouped_all_obs = all_obs_codes.groupBy([
all_obs_codes.coding.code.alias('coding_code'),
all_obs_codes.coding.display.alias('coding_display')]).agg(
F.count('*').alias('num_obs'))
print('Number of codes= ', grouped_all_obs.count())
grouped_all_obs.head(5)
grouped_all_obs.show()
"""
Explanation: explode and groupBy
Too flatten a collumn C with array elements, we can use explode(C) where each
row is repeated by the number of elements in its C column. Each new row will have
one of those elements in the new column.
groupBy is a similar concept to SQL GROUP BY.
End of explanation
"""
grouped_all_obs_P = grouped_all_obs.toPandas()
grouped_all_obs_P
# Verifying that all observations have exactly one code.
grouped_all_obs_P['num_obs'].sum()
"""
Explanation: Conversion to Pandas DataFrame
Whenever a Spark DataFrame is small enough, we can simply convert it to a Pandas DataFrame
to make it easier to work with.
End of explanation
"""
all_obs.head()
coded_obs = all_obs.select(
all_obs.subject.patientId.alias('patientId'),
all_obs.effective.dateTime.alias('dateTime'),
all_obs.value,
F.explode(all_obs.code.coding).alias('coding'))
coded_obs.head()
all_patients.head()
patient_name = all_patients.select(
all_patients.id,
all_patients.name[0].given[0].alias('given'),
all_patients.name[0].family.alias('family')).withColumn(
'core_id', F.regexp_replace('id', BASE_PATIENT_URL, ''))
patient_name.head()
joined_obs = patient_name.join(
coded_obs, patient_name.core_id == coded_obs.patientId)
print('Number of joined rows= ', joined_obs.count())
joined_obs.head()
print('all_obs: {} coded_obs: {}'.format(all_obs.count(), coded_obs.count()))
agg_obs = joined_obs.groupBy(
[joined_obs.patientId, joined_obs.given, joined_obs.family, joined_obs.coding.code]).agg(
{'*':'count', 'value.quantity.value':'max'}).withColumnRenamed('count(1)', 'num_obs')
agg_obs.head(2)
# Doing the same thing with a better API.
agg_obs = joined_obs.groupBy([
joined_obs.patientId,
joined_obs.given,
joined_obs.family,
joined_obs.coding.code.alias('coding_code')
]).agg(
F.count('*').alias('num_obs'),
F.max(joined_obs.value.quantity.value).alias('max_value'),
F.min(joined_obs.dateTime).alias('min_date'),
F.max(joined_obs.dateTime).alias('max_date')
)
print('Number of aggregated rows= ', agg_obs.count())
agg_obs.head(2)
"""
Explanation: Restructure Observations and join with Patients
We can join Observation and Patient resources to get demographic information of each patient.
Note there is
an open issue
to fix the exported FHIR IDs in the ETL pipelines and drop the full server URL.
End of explanation
"""
def flatten_obs(obs: DataFrame) -> DataFrame:
return obs.select(
obs.subject.patientId.alias('patientId'),
obs.effective.dateTime.alias('dateTime'),
obs.value,
F.explode(obs.code.coding).alias('coding'))
def aggregate_code_per_patient(
flat_obs: DataFrame, code: str, start_date: str=None, end_date: str=None) -> DataFrame:
if code is None:
raise ValueError('`code` is expected to be a valid code string.')
start_obs = flat_obs
if start_date:
start_obs = flat_obs.filter(flat_obs.dateTime > start_date)
date_obs = start_obs
if end_date:
date_obs = start_obs.filter(start_obs.dateTime < end_date)
code_obs = date_obs.filter(date_obs.coding.code == code)
return code_obs.groupBy([
flat_obs.patientId,
flat_obs.coding.code.alias('coding_code')
]).agg(
F.count('*').alias('num_obs'),
F.min(flat_obs.value.quantity.value).alias('min_value'),
F.max(flat_obs.value.quantity.value).alias('max_value'),
F.min(flat_obs.dateTime).alias('min_date'),
F.max(flat_obs.dateTime).alias('max_date')
)
flat_obs = flatten_obs(all_obs)
flat_obs.count()
# Sample aggregation for "Height".
aggregated_code_obs = aggregate_code_per_patient(flat_obs, '5090AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA')
aggregated_code_obs.cache()
aggregated_code_obs.count()
# Without `cache()` in previous cell, the following `count()` takes another
# few seconds. With `cache` it is sub-second.
aggregated_code_obs.count()
aggregated_code_obs.head(2)
aggregated_code_obs_P = aggregated_code_obs.toPandas()
aggregated_code_obs_P.head()
# This is just to showcase conversion to Pandas and graph functionality; the
# data has no realistic meaning as they are randomly generated (height values).
aggregated_code_obs_P.plot(kind='scatter', x='min_value', y='max_value')
"""
Explanation: Making some functions out of these
Once we are done with step-by-step curing of a query, we can turn the whole process into
functions to be used in Python scripts ran through spark-submit.
End of explanation
"""
flat_obs.head()
test_pivot = flat_obs.groupBy([
flat_obs.patientId
]).pivot('coding.code').agg(
F.min(flat_obs.value.quantity.value),
F.max(flat_obs.value.quantity.value)
)
test_pivot.head()
def flatten_obs(obs: DataFrame) -> DataFrame:
return obs.select(
obs.subject.patientId.alias('patientId'),
obs.effective.dateTime.alias('dateTime'),
obs.value,
F.explode(obs.code.coding).alias('coding'))
def aggregate_all_codes_per_patient(
obs: DataFrame,
codes: List[str]=None,
start_date: str=None,
end_date: str=None) -> DataFrame:
flat_obs = flatten_obs(obs)
start_obs = flat_obs
if start_date:
start_obs = flat_obs.filter(flat_obs.dateTime > start_date)
date_obs = start_obs
if end_date:
date_obs = start_obs.filter(start_obs.dateTime < end_date)
code_obs = date_obs
if codes:
code_obs = date_obs.filter(date_obs.coding.code.isin(codes))
return date_obs.groupBy([
flat_obs.patientId,
]).pivot('coding.code', values=codes).agg(
F.count('*').alias('num_obs'),
F.min(flat_obs.value.quantity.value).alias('min_value'),
F.max(flat_obs.value.quantity.value).alias('max_value'),
F.min(flat_obs.dateTime).alias('min_date'),
F.max(flat_obs.dateTime).alias('max_date')
)
agg_obs = aggregate_all_codes_per_patient(all_obs, codes=[
'5089AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'5090AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'])
# This can also cause OOM errors if we use the default 1GB of spark.driver.memory.
agg_obs.head()
# Plotting maximum height as a function of minimum weight.
# Note random data!
agg_obs_P = agg_obs.toPandas()
agg_obs_P.plot(
kind='scatter',
x='5089AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA_min_value', xlabel='mimum weight',
y='5090AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA_max_value', ylabel='maximum height',
s=2, title='Maximum Height vs Minimum Weight')
"""
Explanation: Extracting all observations for each patient
A handy view of the data, is to group all observations for each patient in a row.
For each observation code, we calculate some aggregates. To have all observations
gathered in the same patient row, we can use pivot().
End of explanation
"""
patients = all_patients
base_patient_url = BASE_PATIENT_URL
patients.head()
flat_patients = patients.select(
patients.id, patients.birthDate, patients.gender).withColumn(
'actual_id', F.regexp_replace('id', base_patient_url, ''))
flat_patients.head()
patient_agg_obs = flat_patients.join(agg_obs, flat_patients.actual_id == agg_obs.patientId)
patient_agg_obs.cache()
patient_agg_obs.head()
patient_agg_obs.count()
def find_age_band(birth_date: str) -> str:
birth = datetime.strptime(birth_date, '%Y-%m-%d')
age = int((datetime.today() - birth).days / 365.25)
if age < 1:
return '0-1'
if age <= 4:
return '1-4'
if age <= 9:
return '5-9'
if age <= 14:
return '10-14'
if age <= 19:
return '15-19'
if age <= 24:
return '20-24'
if age <= 49:
return '25-49'
return '50+'
print(find_age_band('1981-01-07'))
print(find_age_band('1996-01-07'))
print(find_age_band('1996-05-07'))
# Depending on the disaggregation buckets, each patient should to be
# counted in multiple buckets. This function create the list of buckets.
def agg_buckets(birth_date: str, gender: str) -> List[str]:
age_band = find_age_band(birth_date)
return [age_band + '_' + gender, 'ALL-AGES_' + gender,
age_band + '_ALL-GENDERS', 'ALL-AGES_ALL-GENDERS']
print(agg_buckets('1981-01-07', 'female'))
#find_age_band_udf = F.UserDefinedFunction(lambda x: find_age_band(x), T.StringType())
agg_buckets_udf = F.UserDefinedFunction(lambda a, g: agg_buckets(a, g), T.ArrayType(T.StringType()))
num_patients = patient_agg_obs.count()
VL_code = '5090AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
VL_df = patient_agg_obs.withColumn(
'sup_VL', patient_agg_obs[VL_code + '_max_value'] < 150).withColumn(
'agg_buckets', agg_buckets_udf(patient_agg_obs['birthDate'], patient_agg_obs['gender'])
)
VL_df.head()
num_patients = VL_df.count()
num_patients
VL_agg_P = VL_df.select(
VL_df.sup_VL,
F.explode(VL_df.agg_buckets).alias('agg_bucket')).groupBy(
'sup_VL', 'agg_bucket').agg(
F.count('*').alias('count')).toPandas().sort_values(
['agg_bucket', 'sup_VL'])
VL_agg_P['ratio'] = VL_agg_P['count']/num_patients
VL_agg_P
VL_agg_P[VL_agg_P['agg_bucket'] == 'ALL-AGES_ALL-GENDERS']
# It seems better not to mix the RDD API with DataFrame but an alternative
# solution for counting one row in multiple buckets was to use rdd.flatMap.
test = VL_df.rdd.flatMap(lambda x: [x['birthDate'], x[2]])
test.collect()[0:10]
"""
Explanation: Joining with patient demographic data
For calculating PEPFAR indicators we need to do disaggregations based on geneder , age, etc.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/data-engineering/demos/composer_gcf_trigger/composertriggered.ipynb | apache-2.0 | import os
PROJECT = 'your-project-id' # REPLACE WITH YOUR PROJECT ID
REGION = 'us-central1' # REPLACE WITH YOUR REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
"""
Explanation: Triggering a Cloud Composer Pipeline with a Google Cloud Function
In this advanced lab you will learn how to create and run an Apache Airflow workflow in Cloud Composer that completes the following tasks:
- Watches for new CSV data to be uploaded to a Cloud Storage bucket
- A Cloud Function call triggers the Cloud Composer Airflow DAG to run when a new file is detected
- The workflow finds the input file that triggered the workflow and executes a Cloud Dataflow job to transform and output the data to BigQuery
- Moves the original input file to a different Cloud Storage bucket for storing processed files
Part One: Create Cloud Composer environment and workflow
First, create a Cloud Composer environment if you don't have one already by doing the following:
1. In the Navigation menu under Big Data, select Composer
2. Select Create
3. Set the following parameters:
- Name: mlcomposer
- Location: us-central1
- Other values at defaults
4. Select Create
The environment creation process is completed when the green checkmark displays to the left of the environment name on the Environments page in the GCP Console.
It can take up to 20 minutes for the environment to complete the setup process. Move on to the next section - Create Cloud Storage buckets and BigQuery dataset.
Set environment variables
End of explanation
"""
%%bash
## create GCS buckets
exists=$(gsutil ls -d | grep -w gs://${PROJECT}_input/)
if [ -n "$exists" ]; then
echo "Skipping the creation of input bucket."
else
echo "Creating input bucket."
gsutil mb -l ${REGION} gs://${PROJECT}_input
echo "Loading sample data for later"
gsutil cp resources/usa_names.csv gs://${PROJECT}_input
fi
exists=$(gsutil ls -d | grep -w gs://${PROJECT}_output/)
if [ -n "$exists" ]; then
echo "Skipping the creation of output bucket."
else
echo "Creating output bucket."
gsutil mb -l ${REGION} gs://${PROJECT}_output
fi
"""
Explanation: Create Cloud Storage buckets
Create two Cloud Storage Multi-Regional buckets in your project.
- project-id_input
- project-id_output
Run the below to automatically create the buckets and load some sample data:
End of explanation
"""
%%writefile simple_load_dag.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple Airflow DAG that is triggered externally by a Cloud Function when a
file lands in a GCS bucket.
Once triggered the DAG performs the following steps:
1. Triggers a Google Cloud Dataflow job with the input file information received
from the Cloud Function trigger.
2. Upon completion of the Dataflow job, the input file is moved to a
gs://<target-bucket>/<success|failure>/YYYY-MM-DD/ location based on the
status of the previous step.
"""
import datetime
import logging
import os
from airflow import configuration
from airflow import models
from airflow.contrib.hooks import gcs_hook
from airflow.contrib.operators import dataflow_operator
from airflow.operators import python_operator
from airflow.utils.trigger_rule import TriggerRule
# We set the start_date of the DAG to the previous date. This will
# make the DAG immediately available for scheduling.
YESTERDAY = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
# We define some variables that we will use in the DAG tasks.
SUCCESS_TAG = 'success'
FAILURE_TAG = 'failure'
# An Airflow variable called gcp_completion_bucket is required.
# This variable will contain the name of the bucket to move the processed
# file to.
# '_names' must appear in CSV filename to be ingested (adjust as needed)
# we are only looking for files with the exact name usa_names.csv (you can specify wildcards if you like)
INPUT_BUCKET_CSV = 'gs://'+models.Variable.get('gcp_input_location')+'/usa_names.csv'
# TODO: Populate the models.Variable.get() with the actual variable name for your output bucket
COMPLETION_BUCKET = 'gs://'+models.Variable.get('gcp_completion_bucket')
DS_TAG = '{{ ds }}'
DATAFLOW_FILE = os.path.join(
configuration.get('core', 'dags_folder'), 'dataflow', 'process_delimited.py')
# The following additional Airflow variables should be set:
# gcp_project: Google Cloud Platform project id.
# gcp_temp_location: Google Cloud Storage location to use for Dataflow temp location.
DEFAULT_DAG_ARGS = {
'start_date': YESTERDAY,
'retries': 2,
# TODO: Populate the models.Variable.get() with the variable name for your GCP Project
'project_id': models.Variable.get('gcp_project'),
'dataflow_default_options': {
'project': models.Variable.get('gcp_project'),
# TODO: Populate the models.Variable.get() with the variable name for temp location
'temp_location': 'gs://'+models.Variable.get('gcp_temp_location'),
'runner': 'DataflowRunner'
}
}
def move_to_completion_bucket(target_bucket, target_infix, **kwargs):
"""A utility method to move an object to a target location in GCS."""
# Here we establish a connection hook to GoogleCloudStorage.
# Google Cloud Composer automatically provides a google_cloud_storage_default
# connection id that is used by this hook.
conn = gcs_hook.GoogleCloudStorageHook()
# The external trigger (Google Cloud Function) that initiates this DAG
# provides a dag_run.conf dictionary with event attributes that specify
# the information about the GCS object that triggered this DAG.
# We extract the bucket and object name from this dictionary.
source_bucket = models.Variable.get('gcp_input_location')
source_object = models.Variable.get('gcp_input_location')+'/usa_names.csv'
completion_ds = kwargs['ds']
target_object = os.path.join(target_infix, completion_ds, source_object)
logging.info('Copying %s to %s',
os.path.join(source_bucket, source_object),
os.path.join(target_bucket, target_object))
conn.copy(source_bucket, source_object, target_bucket, target_object)
logging.info('Deleting %s',
os.path.join(source_bucket, source_object))
conn.delete(source_bucket, source_object)
# Setting schedule_interval to None as this DAG is externally trigger by a Cloud Function.
# The following Airflow variables should be set for this DAG to function:
# bq_output_table: BigQuery table that should be used as the target for
# Dataflow in <dataset>.<tablename> format.
# e.g. lake.usa_names
# input_field_names: Comma separated field names for the delimited input file.
# e.g. state,gender,year,name,number,created_date
# TODO: Name the DAG id GcsToBigQueryTriggered
with models.DAG(dag_id='GcsToBigQueryTriggered',
description='A DAG triggered by an external Cloud Function',
schedule_interval=None, default_args=DEFAULT_DAG_ARGS) as dag:
# Args required for the Dataflow job.
job_args = {
'input': INPUT_BUCKET_CSV,
# TODO: Populate the models.Variable.get() with the variable name for BQ table
'output': models.Variable.get('bq_output_table'),
# TODO: Populate the models.Variable.get() with the variable name for input field names
'fields': models.Variable.get('input_field_names'),
'load_dt': DS_TAG
}
# Main Dataflow task that will process and load the input delimited file.
# TODO: Specify the type of operator we need to call to invoke DataFlow
dataflow_task = dataflow_operator.DataFlowPythonOperator(
task_id="process-delimited-and-push",
py_file=DATAFLOW_FILE,
options=job_args)
# Here we create two conditional tasks, one of which will be executed
# based on whether the dataflow_task was a success or a failure.
success_move_task = python_operator.PythonOperator(task_id='success-move-to-completion',
python_callable=move_to_completion_bucket,
# A success_tag is used to move
# the input file to a success
# prefixed folder.
op_args=[models.Variable.get('gcp_completion_bucket'), SUCCESS_TAG],
provide_context=True,
trigger_rule=TriggerRule.ALL_SUCCESS)
failure_move_task = python_operator.PythonOperator(task_id='failure-move-to-completion',
python_callable=move_to_completion_bucket,
# A failure_tag is used to move
# the input file to a failure
# prefixed folder.
op_args=[models.Variable.get('gcp_completion_bucket'), FAILURE_TAG],
provide_context=True,
trigger_rule=TriggerRule.ALL_FAILED)
# The success_move_task and failure_move_task are both downstream from the
# dataflow_task.
dataflow_task >> success_move_task
dataflow_task >> failure_move_task
"""
Explanation: Create BigQuery Destination Dataset and Table
Next, we'll create a data sink to store the ingested data from GCS<br><br>
Create a new Dataset
In the Navigation menu, select BigQuery
Then click on your qwiklabs project ID
Click Create Dataset
Name your dataset ml_pipeline and leave other values at defaults
Click Create Dataset
Create a new empty table
Click on the newly created dataset
Click Create Table
For Destination Table name specify ingest_table
For schema click Edit as Text and paste in the below schema
state: STRING,<br>
gender: STRING,<br>
year: STRING,<br>
name: STRING,<br>
number: STRING,<br>
created_date: STRING,<br>
filename: STRING,<br>
load_dt: DATE<br><br>
Click Create Table
Review of Airflow concepts
While your Cloud Composer environment is building, let’s discuss the sample file you’ll be using in this lab.
<br><br>
Airflow is a platform to programmatically author, schedule and monitor workflows
<br><br>
Use airflow to author workflows as directed acyclic graphs (DAGs) of tasks. The airflow scheduler executes your tasks on an array of workers while following the specified dependencies.
<br><br>
Core concepts
DAG - A Directed Acyclic Graph is a collection of tasks, organised to reflect their relationships and dependencies.
Operator - The description of a single task, it is usually atomic. For example, the BashOperator is used to execute bash command.
Task - A parameterised instance of an Operator; a node in the DAG.
Task Instance - A specific run of a task; characterised as: a DAG, a Task, and a point in time. It has an indicative state: running, success, failed, skipped, …<br><br>
The rest of the Airflow concepts can be found here.
Complete the DAG file
Cloud Composer workflows are comprised of DAGs (Directed Acyclic Graphs). The code shown in simple_load_dag.py is the workflow code, also referred to as the DAG.
<br><br>
Open the file now to see how it is built. Next will be a detailed look at some of the key components of the file.
<br><br>
To orchestrate all the workflow tasks, the DAG imports the following operators:
- DataFlowPythonOperator
- PythonOperator
<br><br>
Action: <span style="color:blue">Complete the # TODOs in the simple_load_dag.py DAG file below</span> file while you wait for your Composer environment to be setup.
End of explanation
"""
## Run this to display which key value pairs to input
import pandas as pd
pd.DataFrame([
('gcp_project', PROJECT),
('gcp_input_location', PROJECT + '_input'),
('gcp_temp_location', PROJECT + '_output/tmp'),
('gcp_completion_bucket', PROJECT + '_output'),
('input_field_names', 'state,gender,year,name,number,created_date'),
('bq_output_table', 'ml_pipeline.ingest_table')
], columns = ['Key', 'Value'])
"""
Explanation: Viewing environment information
Now that you have a completed DAG, it's time to copy it to your Cloud Composer environment and finish the setup of your workflow.<br><br>
1. Go back to Composer to check on the status of your environment.
2. Once your environment has been created, click the name of the environment to see its details.
<br><br>
The Environment details page provides information, such as the Airflow web UI URL, Google Kubernetes Engine cluster ID, name of the Cloud Storage bucket connected to the DAGs folder.
<br><br>
Cloud Composer uses Cloud Storage to store Apache Airflow DAGs, also known as workflows. Each environment has an associated Cloud Storage bucket. Cloud Composer schedules only the DAGs in the Cloud Storage bucket.
Setting Airflow variables
Our DAG relies on variables to pass in values like the GCP Project. We can set these in the Admin UI.
Airflow variables are an Airflow-specific concept that is distinct from environment variables. In this step, you'll set the following six Airflow variables used by the DAG we will deploy.
End of explanation
"""
%%bash
gcloud composer environments run ENVIRONMENT_NAME \
--location ${REGION} variables -- \
--set gcp_project ${PROJECT}}
"""
Explanation: Option 1: Set the variables using the Airflow webserver UI
In your Airflow environment, select Admin > Variables
Populate each key value in the table with the required variables from the above table
Option 2: Set the variables using the Airflow CLI
The next gcloud composer command executes the Airflow CLI sub-command variables. The sub-command passes the arguments to the gcloud command line tool.<br><br>
To set the three variables, run the gcloud composer command once for each row from the above table. Just as an example, to set the variable gcp_project you could do this:
End of explanation
"""
AIRFLOW_BUCKET = 'us-central1-composer-21587538-bucket' # REPLACE WITH AIRFLOW BUCKET NAME
os.environ['AIRFLOW_BUCKET'] = AIRFLOW_BUCKET
"""
Explanation: Copy your Airflow bucket name
Navigate to your Cloud Composer instance<br/><br/>
Select DAGs Folder<br/><br/>
You will be taken to the Google Cloud Storage bucket that Cloud Composer has created automatically for your Airflow instance<br/><br/>
Copy the bucket name into the variable below (example: us-central1-composer-08f6edeb-bucket)
End of explanation
"""
%%bash
gsutil cp simple_load_dag.py gs://${AIRFLOW_BUCKET}/dags # overwrite DAG file if it exists
gsutil cp -r dataflow/process_delimited.py gs://${AIRFLOW_BUCKET}/dags/dataflow/ # copy Dataflow job to be ran
"""
Explanation: Copy your Airflow files to your Airflow bucket
End of explanation
"""
import google.auth
import google.auth.transport.requests
import requests
import six.moves.urllib.parse
# Authenticate with Google Cloud.
# See: https://cloud.google.com/docs/authentication/getting-started
credentials, _ = google.auth.default(
scopes=['https://www.googleapis.com/auth/cloud-platform'])
authed_session = google.auth.transport.requests.AuthorizedSession(
credentials)
project_id = 'your-project-id'
location = 'us-central1'
composer_environment = 'composer'
environment_url = (
'https://composer.googleapis.com/v1beta1/projects/{}/locations/{}'
'/environments/{}').format(project_id, location, composer_environment)
composer_response = authed_session.request('GET', environment_url)
environment_data = composer_response.json()
airflow_uri = environment_data['config']['airflowUri']
# The Composer environment response does not include the IAP client ID.
# Make a second, unauthenticated HTTP request to the web server to get the
# redirect URI.
redirect_response = requests.get(airflow_uri, allow_redirects=False)
redirect_location = redirect_response.headers['location']
# Extract the client_id query parameter from the redirect.
parsed = six.moves.urllib.parse.urlparse(redirect_location)
query_string = six.moves.urllib.parse.parse_qs(parsed.query)
print(query_string['client_id'][0])
"""
Explanation: Navigating Using the Airflow UI
To access the Airflow web interface using the GCP Console:
1. Go back to the Composer Environments page.
2. In the Airflow webserver column for the environment, click the new window icon.
3. The Airflow web UI opens in a new browser window.
Trigger DAG run manually
Running your DAG manually ensures that it operates successfully even in the absence of triggered events.
1. Trigger the DAG manually click the play button under Links
Part Two: Trigger DAG run automatically from a file upload to GCS
Now that your manual workflow runs successfully, you will now trigger it based on an external event.
Create a Cloud Function to trigger your workflow
We will be following this reference guide to setup our Cloud Function
1. In the code block below, uncomment the project_id, location, and composer_environment and populate them
2. Run the below code to get your CLIENT_ID (needed later)
End of explanation
"""
#Execute the following in Cloud Shell, it will not work here
gcloud iam service-accounts add-iam-policy-binding \
your-project-id@appspot.gserviceaccount.com \
--member=serviceAccount:your-project-id@appspot.gserviceaccount.com \
--role=roles/iam.serviceAccountTokenCreator
"""
Explanation: Grant Service Account Permissions
To authenticate to Cloud IAP, grant the Appspot Service Account (used by Cloud Functions) the Service Account Token Creator role on itself. To do this, execute the following command in Cloud Shell. Be sure to replace 'your-project-id'
End of explanation
"""
'use strict';
const fetch = require('node-fetch');
const FormData = require('form-data');
/**
* Triggered from a message on a Cloud Storage bucket.
*
* IAP authorization based on:
* https://stackoverflow.com/questions/45787676/how-to-authenticate-google-cloud-functions-for-access-to-secure-app-engine-endpo
* and
* https://cloud.google.com/iap/docs/authentication-howto
*
* @param {!Object} data The Cloud Functions event data.
* @returns {Promise}
*/
exports.triggerDag = async data => {
// Fill in your Composer environment information here.
// The project that holds your function
const PROJECT_ID = 'your-project-id';
// Navigate to your webserver's login page and get this from the URL
const CLIENT_ID = 'your-iap-client-id';
// This should be part of your webserver's URL:
// {tenant-project-id}.appspot.com
const WEBSERVER_ID = 'your-tenant-project-id';
// The name of the DAG you wish to trigger
const DAG_NAME = 'GcsToBigQueryTriggered';
// Other constants
const WEBSERVER_URL = `https://${WEBSERVER_ID}.appspot.com/api/experimental/dags/${DAG_NAME}/dag_runs`;
const USER_AGENT = 'gcf-event-trigger';
const BODY = {conf: JSON.stringify(data)};
// Make the request
try {
const iap = await authorizeIap(CLIENT_ID, PROJECT_ID, USER_AGENT);
return makeIapPostRequest(
WEBSERVER_URL,
BODY,
iap.idToken,
USER_AGENT,
iap.jwt
);
} catch (err) {
throw new Error(err);
}
};
/**
* @param {string} clientId The client id associated with the Composer webserver application.
* @param {string} projectId The id for the project containing the Cloud Function.
* @param {string} userAgent The user agent string which will be provided with the webserver request.
*/
const authorizeIap = async (clientId, projectId, userAgent) => {
const SERVICE_ACCOUNT = `${projectId}@appspot.gserviceaccount.com`;
const JWT_HEADER = Buffer.from(
JSON.stringify({alg: 'RS256', typ: 'JWT'})
).toString('base64');
let jwt = '';
let jwtClaimset = '';
// Obtain an Oauth2 access token for the appspot service account
const res = await fetch(
`http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/${SERVICE_ACCOUNT}/token`,
{
headers: {'User-Agent': userAgent, 'Metadata-Flavor': 'Google'},
}
);
const tokenResponse = await res.json();
if (tokenResponse.error) {
return Promise.reject(tokenResponse.error);
}
const accessToken = tokenResponse.access_token;
const iat = Math.floor(new Date().getTime() / 1000);
const claims = {
iss: SERVICE_ACCOUNT,
aud: 'https://www.googleapis.com/oauth2/v4/token',
iat: iat,
exp: iat + 60,
target_audience: clientId,
};
jwtClaimset = Buffer.from(JSON.stringify(claims)).toString('base64');
const toSign = [JWT_HEADER, jwtClaimset].join('.');
const blob = await fetch(
`https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/${SERVICE_ACCOUNT}:signBlob`,
{
method: 'POST',
body: JSON.stringify({
bytesToSign: Buffer.from(toSign).toString('base64'),
}),
headers: {
'User-Agent': userAgent,
Authorization: `Bearer ${accessToken}`,
},
}
);
const blobJson = await blob.json();
if (blobJson.error) {
return Promise.reject(blobJson.error);
}
// Request service account signature on header and claimset
const jwtSignature = blobJson.signature;
jwt = [JWT_HEADER, jwtClaimset, jwtSignature].join('.');
const form = new FormData();
form.append('grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer');
form.append('assertion', jwt);
const token = await fetch('https://www.googleapis.com/oauth2/v4/token', {
method: 'POST',
body: form,
});
const tokenJson = await token.json();
if (tokenJson.error) {
return Promise.reject(tokenJson.error);
}
return {
jwt: jwt,
idToken: tokenJson.id_token,
};
};
/**
* @param {string} url The url that the post request targets.
* @param {string} body The body of the post request.
* @param {string} idToken Bearer token used to authorize the iap request.
* @param {string} userAgent The user agent to identify the requester.
*/
const makeIapPostRequest = async (url, body, idToken, userAgent) => {
const res = await fetch(url, {
method: 'POST',
headers: {
'User-Agent': userAgent,
Authorization: `Bearer ${idToken}`,
},
body: JSON.stringify(body),
});
if (!res.ok) {
const err = await res.text();
throw new Error(err);
}
};
"""
Explanation: Create the Cloud Function
Navigate to Compute > Cloud Functions
Select Create function
For name specify 'gcs-dag-trigger-function'
For trigger type select 'Cloud Storage'
For event type select 'Finalize/Create'
For bucket, specify the input bucket you created earlier
Important: be sure to select the input bucket and not the output bucket to avoid an endless triggering loop)
populate index.js
Complete the four required constants defined below in index.js code and paste it into the Cloud Function editor (the js code will not run in this notebook). The constants are:
- PROJECT_ID
- CLIENT_ID (from earlier)
- WEBSERVER_ID (part of Airflow webserver URL)
- DAG_NAME (GcsToBigQueryTriggered)
End of explanation
"""
{
"name": "nodejs-docs-samples-functions-composer-storage-trigger",
"version": "0.0.1",
"dependencies": {
"form-data": "^2.3.2",
"node-fetch": "^2.2.0"
},
"engines": {
"node": ">=8.0.0"
},
"private": true,
"license": "Apache-2.0",
"author": "Google Inc.",
"repository": {
"type": "git",
"url": "https://github.com/GoogleCloudPlatform/nodejs-docs-samples.git"
},
"devDependencies": {
"@google-cloud/nodejs-repo-tools": "^3.3.0",
"mocha": "^6.0.0",
"proxyquire": "^2.1.0",
"sinon": "^7.2.7"
},
"scripts": {
"test": "mocha test/*.test.js --timeout=20000"
}
}
"""
Explanation: populate package.json
Copy and paste the below into package.json
End of explanation
"""
|
analysiscenter/dataset | examples/tutorials/09_tracking.ipynb | apache-2.0 | %load_ext autoreload
%autoreload 2
import sys
import warnings
warnings.filterwarnings("ignore")
import torch
import numpy as np
from tqdm import tqdm_notebook, tqdm
sys.path.append('../..')
from batchflow import Notifier, Pipeline, Dataset, I, W, V, L, B
from batchflow.monitor import *
# Set GPU
%env CUDA_VISIBLE_DEVICES=0
DEVICE = torch.device('cuda:0')
torch.ones((1, 1, 1), device=DEVICE)
BAR = 't' # can be changed to 'n' to use Jupyter Notebook progress bar
"""
Explanation: Tracking iterative executions
One of the main primitives in our framework is Pipeline, which runs the same list of procedures over and over. It is essential to have a tool to notify the user on its progress; for situations where plain tqdm bar is just not enough we've developed Notifier and Monitor classes that provide fine-grained updates on the state of iterative flow. This notebooks shows how to:
track utilization for a wide variety of resources (cpu, gpu, memory, etc)
create an iteration-wise notifications for values of pipeline variables and other entities
plot graphs (for example, loss values) on the fly
End of explanation
"""
for item in Notifier(BAR)(range(5)):
print(item)
"""
Explanation: Notifier
In the simplest case we use Notifier as a drop-in replacement for tqdm progress bar, and the behaviour remains the same. Positional argument bar tells which progress bar implementation to use:
t stands for plain text tqdm
n allows to make use of modern tqdm_notebook graphics
a automatically changes from text-based to GUI, depending on the environment it is being used
End of explanation
"""
%time for item in Notifier('t')(range(100000)): pass
%time for item in tqdm(range(100000)): pass
%time for item in Notifier('n')(range(100000)): pass
%time for item in tqdm_notebook(range(100000)): pass
"""
Explanation: As some of the loops are running hundreds of iterations per second, we should take special care of speed of updating: as we can see, the overhead is negligeable for both text and graphical versions of the Notifier:
End of explanation
"""
with monitor_cpu(frequency=0.1) as cpu_monitor:
for _ in Notifier(BAR)(range(10)):
_ = np.random.random((1000, 10000))
cpu_monitor.visualize()
"""
Explanation: Monitors
We will return to Notifier and its other arguments in just a second; for now, let's look at other feature: Monitors. As we intent to use our pipelines to work with enormous neural networks on gigantic datasets, we must finely control all of our resources: CPU, RAM, GPU load, etc. Monitors provide us tools to visualize resource utilization over a period of time: mere addition of a special context manager allows us to look into CPU usage in details:
End of explanation
"""
with monitor_resource(['uss', 'gpu', 'gpu_memory'], frequency=0.1) as (uss_monitor, gpu_monitor, gpum_monitor):
for _ in Notifier(BAR)(range(42)):
cpu_data = np.random.random((1000, 10000))
gpu_data = torch.ones((256, 512, 2096), device=DEVICE)
gpu_op = torch.mvlgamma(torch.erfinv(gpu_data), 1) # intense operation
torch.cuda.empty_cache()
uss_monitor.visualize()
gpu_monitor.visualize()
gpum_monitor.visualize()
"""
Explanation: Under the hood Monitor creates a separate process, that checks the state of a resource every frequency seconds and can fetch collected data on demand.
There are a number of resources that can be tracked:
- CPU utilization (cpu)
- RAM used by the current process (uss)
- total RAM used on the machine (memory)
- GPU utilization (gpu)
- GPU memory used (gpu_memory)
For convenience we also provide context manager to track multiple entities at once:
End of explanation
"""
notifier = Notifier(BAR, monitors=['memory', 'cpu'])
for _ in notifier(range(100)):
_ = np.random.random((1000, 100))
notifier.visualize()
"""
Explanation: This feature is immensely helpful during both research and deploy stages, so we included it in the Notifier itself: it has never been so easy to always keep track of all the resources!
Note that monitors connected to an instance of Notifier provide information on the resource usage not only every frequency seconds, but also at the end of each iteration.
End of explanation
"""
pipeline = (
Pipeline()
.init_variable('loss_history', [])
.init_variable('image')
.update(V('loss_history', mode='a'), 100 * 2 ** (-I()))
.update(V('image'), L(np.random.random)((30, 30)))
) << Dataset(10)
"""
Explanation: Pipeline
As was already mentioned, one of the prime purposes of our notification system is to be used in tandem with Pipeline. We create a rather simple one with following variables:
list loss_history that is updated at each iteration with a new value
2D array image that is randomly generated at each iteration
End of explanation
"""
pipeline.reset('all')
_ = pipeline.run(1, n_iters=10, notifier=BAR)
"""
Explanation: Vanilla pipeline
The easiest way to track progress is to supply notifier argument to the run method. It works with:
an instance of Notifier
string (t, n, a) would be used as positional argument for Notifier creation in the bowels of Pipeline
dictionary with parameters of Notifier initialization
End of explanation
"""
pipeline.reset('all')
_ = pipeline.run(1, n_iters=10, notifier=Notifier(BAR, monitors='loss_history'))
pipeline.notifier.visualize()
"""
Explanation: Track pipeline variables
We can use notifier to track values of pipeline variables at each iteration: conveniently, we use the same monitor argument:
End of explanation
"""
pipeline.reset('all')
_ = pipeline.run(1, n_iters=50, notifier=Notifier(BAR, monitors=['cpu', 'loss_history'], file='notifications.txt'))
pipeline.notifier.visualize()
!head notifications.txt -n 13
"""
Explanation: Obviously, we can use the same resource monitors, as before, by passing additional items to monitors. There is also file argument, that allows us to log the progress to an external storage:
End of explanation
"""
pipeline.reset('all')
_ = pipeline.run(1, n_iters=10, notifier=Notifier('n', graphs=['memory', 'loss_history']))
"""
Explanation: Live plots
One of the distinct features of Notifier is its ability of plotting data on the fly: to do so, we must supply graphs argument. Its semantics are exactly the same as those of monitors parameter.
End of explanation
"""
pipeline.reset('all')
_ = pipeline.run(1, n_iters=100, notifier=Notifier('n', graphs=['memory', 'loss_history', 'image'], frequency=10))
"""
Explanation: It can work with images also. As the rendering of plots might take some time, we want to do so once every 10 iterations and achieve so by using frequency parameter:
End of explanation
"""
def custom_plotter(ax=None, container=None, **kwargs):
""" Zero-out center area of the image, change plot parameters. """
container['data'][10:20, 10:20] = 0
ax.imshow(container['data'])
ax.set_title(container['name'], fontsize=18)
ax.set_xlabel('axis one', fontsize=18)
ax.set_ylabel('axis two', fontsize=18)
pipeline.reset('all')
_ = pipeline.run(1, n_iters=100,
notifier=Notifier('n',
graphs=[{'source': 'memory',
'name': 'my custom monitor'},
{'source': 'image',
'name': 'amazing plot',
'plot_function': custom_plotter}],
frequency=10)
)
"""
Explanation: Advanced usage of Notifier
Different people prefer different types of notifications: for some modest print is enough, some build complex systems to bring notifications to their mobile devices. Our Notifier provides you fine control on how the data is shown so you can tune it for yourself.
Previously, we passed strings as monitor/graphs parameters to identify, which exactly pipeline variable or resource should be tracked. Under the hood, each of them is parsed to a dictionary with parameters:
source determines the data generator
name used for plot titles and near bar text descriptions
plot_function can be called for custom plotting mechanism
We can leverage those parameters make all the plots to our liking:
End of explanation
"""
|
poldrack/fmri-analysis-vm | analysis/Bayesian/VariationalBayes.ipynb | mit | import numpy,scipy
import time
from numpy.linalg import inv
from scipy.special import digamma,gammaln
from numpy import log,pi,trace
from numpy.linalg import det
import matplotlib.pyplot as plt
from pymc3 import Model,glm,find_MAP,NUTS,sample,Metropolis,HalfCauchy,Normal
%matplotlib inline
"""
Explanation: This notebook provides an example of the use of variational Bayesian estimation and inference. The VB computations here are based on Kay Broderson's MATLAB demo at https://www.tnu.ethz.ch/de/software/tapas.html
I have contrived a simple example where we compute a one-sample t-test versus zero.
End of explanation
"""
# Create classes for prior and posterior
# % a_0: shape parameter of the prior precision of coefficients
# % b_0: rate parameter of the prior precision of coefficients
# % c_0: shape parameter of the prior noise precision
# % d_0: rate parameter of the prior noise precision
class Prior:
def __init__(self,a_0=10,b_0=0.2,c_0=10,d_0=1):
self.a_0=a_0
self.b_0=b_0
self.c_0=c_0
self.d_0=d_0
class Posterior:
def __init__(self,d,prior):
self.mu_n=numpy.zeros((d,1))
self.Lambda_n = numpy.eye(d)
self.a_n = prior.a_0
self.b_n = prior.b_0
self.c_n = prior.c_0
self.d_n = prior.d_0
self.F = -numpy.inf
self.prior = prior
self.trace = []
# Returns the variational posterior q that maximizes the free energy.
def invert_model(y, X, prior,tolerance=10e-8,verbose=False):
# Data shortcuts
n,d = X.shape # observations x regressors
# Initialize variational posterior
q=Posterior(d,prior)
q.F=free_energy(q,y,X,prior)
# Variational algorithm
nMaxIter = 30
kX = X.T.dot(X)
for i in range(nMaxIter):
# (1) Update q(beta) - regression parameters
q.Lambda_n = q.a_n/q.b_n + q.c_n/q.d_n * (X.T.dot(X))
q.mu_n = q.c_n/q.d_n * numpy.linalg.inv(q.Lambda_n).dot(X.T.dot(y))
# (2) Update q(alpha) - precision
q.a_n = prior.a_0 + d/2
q.b_n = prior.b_0 + 1/2 * (q.mu_n.T.dot(q.mu_n) + trace(inv(q.Lambda_n)));
# (3) Update q(lambda)
q.c_n = prior.c_0 + n/2
pe = y - X.dot(q.mu_n)
q.d_n = prior.d_0 + 0.5 * (pe.T.dot(pe) + trace(inv(q.Lambda_n).dot(kX))) ;
# Compute free energy
F_old = q.F;
q.F = free_energy(q,y,X,prior);
# Convergence?
if (q.F - F_old) < tolerance:
break
if i == nMaxIter:
print('tvblm: reached max iterations',i)
if verbose:
print('converged in %d iterations'%i)
return q
# Computes the free energy of the model given the data.
def free_energy(q,y,X,prior):
# Data shortcuts
n,d = X.shape # observations x regressors
# Expected log joint <ln p(y,beta,alpha,lambda)>_q
J =(n/2*(digamma(q.c_n)-log(q.d_n)) - n/2*log(2*pi)
- 0.5*q.c_n/q.d_n*(y.T.dot(y)) + q.c_n/q.d_n*(q.mu_n.T.dot(X.T.dot(y)))
- 0.5*q.c_n/q.d_n*trace(X.T.dot(X) * (q.mu_n.dot(q.mu_n.T) + inv(q.Lambda_n)))
- d/2*log(2*pi) + n/2*(digamma(q.a_n)-log(q.b_n))
- 0.5*q.a_n/q.b_n * (q.mu_n.T.dot(q.mu_n) + trace(inv(q.Lambda_n)))
+ prior.a_0*log(prior.b_0) - gammaln(prior.a_0)
+ (prior.a_0-1)*(digamma(q.a_n)-log(q.b_n)) - prior.b_0*q.a_n/q.b_n
+ prior.c_0*log(prior.d_0) - gammaln(prior.c_0)
+ (prior.c_0-1)*(digamma(q.c_n)-log(q.d_n)) - prior.d_0*q.c_n/q.d_n)
# Entropy H[q]
H = (d/2*(1+log(2*pi)) + 1/2*log(det(inv(q.Lambda_n)))
+ q.a_n - log(q.b_n) + gammaln(q.a_n) + (1-q.a_n)*digamma(q.a_n)
+ q.c_n - log(q.d_n) + gammaln(q.c_n) + (1-q.c_n)*digamma(q.c_n))
# Free energy
F = J + H
return(F)
"""
Explanation: Set up code to estimate using VB
End of explanation
"""
npts=64
std=1
nruns=1000
# one sample t test
X=numpy.ones((npts,1))
prior=Prior(a_0=10,c_0=10)
means=numpy.arange(0,0.501,0.1)
vb_siglevel=numpy.zeros(len(means))
t_siglevel=numpy.zeros(len(means))
vb_mean=numpy.zeros(len(means))
samp_mean=numpy.zeros(len(means))
t_time=0
vb_time=0
for j,mean in enumerate(means):
vb_pvals=numpy.zeros(nruns)
t_pvals=numpy.zeros(nruns)
vb_means=numpy.zeros(nruns)
samp_means=numpy.zeros(nruns)
for i in range(nruns):
y=numpy.random.randn(npts)*std+mean
samp_means[i]=numpy.mean(y)
t=time.time()
q=invert_model(y, X, prior,verbose=False)
vb_means[i]=q.mu_n
# q.Lambda_n is the estimated precision, so we turn it into a standard deviation
vb_pvals[i]=scipy.stats.norm.cdf(0,q.mu_n,1/numpy.sqrt(q.Lambda_n))
vb_time+=time.time()-t
t=time.time()
_,t_pvals[i]=scipy.stats.ttest_1samp(y,0)
t_time+=time.time()-t
vb_siglevel[j]=numpy.mean(vb_pvals<0.05)
t_siglevel[j]=numpy.mean(t_pvals<0.05)
vb_mean[j]=numpy.mean(vb_means)
samp_mean[j]=numpy.mean(samp_means)
print('Total elapsed time for %d analyses (seconds):'%int(len(means)*nruns))
print('t-test: %0.2f'%t_time)
print('VB: %0.2f'%vb_time)
"""
Explanation: Now create synthetic data with a specified mean and perform a one sample t-test using either the t-test function from scipy.stats or using VB estimation.
End of explanation
"""
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(means,t_siglevel)
plt.plot(means,vb_siglevel)
plt.xlabel('Mean effect (in SD units)')
plt.ylabel('Proportion of exceedences')
plt.legend(['ttest','VB'],loc=2)
plt.subplot(1,2,2)
plt.plot(means,samp_mean)
plt.plot(means,vb_mean)
plt.xlabel('Mean effect (in SD units)')
plt.ylabel('Estimated mean')
plt.legend(['Sample mean','VB estimate'],loc=2)
print('False positive rate:')
print('t-test: %0.4f'%t_siglevel[0])
print('VB: %0.4f'%vb_siglevel[0])
"""
Explanation: Now plot the error/power for the two approaches. For the t-test, this is the proportion of statistically significant outcomes over the realizations (at p<0.05). For the VB estimate, this is the proportion of tests for which zero falls below the 5%ile of the estimated posterior.
End of explanation
"""
|
rishuatgithub/MLPy | torch/PYTORCH_NOTEBOOKS/01-PyTorch-Basics/03-PyTorch-Basics-Exercises-Solutions.ipynb | apache-2.0 | # CODE HERE
import torch
import numpy as np
"""
Explanation: <img src="../Pierian-Data-Logo.PNG">
<br>
<strong><center>Copyright 2019. Created by Jose Marcial Portilla.</center></strong>
PyTorch Basics Exercises - SOLUTIONS
For these exercises we'll create a tensor and perform several operations on it.
<div class="alert alert-danger" style="margin: 10px"><strong>IMPORTANT NOTE!</strong> Make sure you don't run the cells directly above the example output shown, <br>otherwise you will end up writing over the example output!</div>
1. Perform standard imports
Import torch and NumPy
End of explanation
"""
# CODE HERE
np.random.seed(42)
torch.manual_seed(42); # the semicolon suppresses the jupyter output line
"""
Explanation: 2. Set the random seed for NumPy and PyTorch both to "42"
This allows us to share the same "random" results.
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
arr = np.random.randint(0,5,6)
print(arr)
"""
Explanation: 3. Create a NumPy array called "arr" that contains 6 random integers between 0 (inclusive) and 5 (exclusive)
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
x = torch.from_numpy(arr)
print(x)
"""
Explanation: 4. Create a tensor "x" from the array above
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
x = x.type(torch.int64)
# x = x.type(torch.LongTensor)
print(x.type())
"""
Explanation: 5. Change the dtype of x from 'int32' to 'int64'
Note: 'int64' is also called 'LongTensor'
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
x = x.view(3,2)
# x = x.reshape(3,2)
# x.resize_(3,2)
print(x)
"""
Explanation: 6. Reshape x into a 3x2 tensor
There are several ways to do this.
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
print(x[:,1:])
# print(x[:,1])
"""
Explanation: 7. Return the right-hand column of tensor x
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
print(x*x)
# print(x**2)
# print(x.mul(x))
# print(x.pow(2))
# print(torch.mul(x,x))
"""
Explanation: 8. Without changing x, return a tensor of square values of x
There are several ways to do this.
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
y = torch.randint(0,5,(2,3))
print(y)
"""
Explanation: 9. Create a tensor "y" with the same number of elements as x, that can be matrix-multiplied with x
Use PyTorch directly (not NumPy) to create a tensor of random integers between 0 (inclusive) and 5 (exclusive).<br>
Think about what shape it should have to permit matrix multiplication.
End of explanation
"""
# CODE HERE
# DON'T WRITE HERE
print(x.mm(y))
"""
Explanation: 10. Find the matrix product of x and y
End of explanation
"""
|
merryjman/astronomy | sample.ipynb | gpl-3.0 | # Import modules that contain functions we need
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Star catalogue analysis
Thanks to UCF Physics undergrad Tyler Townsend for contributing to the development of this notebook.
End of explanation
"""
# Read in data that will be used for the calculations.
# Using pandas read_csv method, we can create a data frame
data = pd.read_csv("https://raw.githubusercontent.com/merryjman/astronomy/master/stars.csv")
datatwo = pd.read_csv("https://raw.githubusercontent.com/astronexus/HYG-Database/master/hygdata_v3.csv")
# We wish to look at the first 12 rows of our data set
data.head(12)
"""
Explanation: Getting the data
End of explanation
"""
fig = plt.figure(figsize=(15, 4))
plt.scatter(data.ra,data.dec, s=0.01)
plt.xlim(24, 0)
plt.title("All the Stars in the Catalogue")
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
"""
Explanation: Star map
End of explanation
"""
# These are the abbreviations for all the constellations
datatwo.sort_values('con').con.unique()
# This shows just one constellation.
datatwo_con = datatwo.query('con == "UMa"')
#Define a variable called "name" so I don't have to keep renaming the plot title!
name = "Ursa Major"
# This plots where the brightest 15 stars are in the sky
datatwo_con = datatwo_con.sort_values('mag').head(15)
plt.scatter(datatwo_con.ra,datatwo_con.dec)
plt.gca().invert_xaxis()
# I graphed first without the line below, to see what it looks like, then
# I added the plt.xlim(25,20) to make it look nicer.
plt.xlim(15,8)
plt.ylim(30,70)
plt.title('%s In the Sky'%(name))
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
"""
Explanation: Let's Graph a Constellation!
End of explanation
"""
# What did this constellation look like 50,000 years ago??
plt.scatter(datatwo_con.ra-datatwo_con.pmra/1000/3600/15*50000,datatwo_con.dec-datatwo_con.pmdec/1000/3600*50000)
plt.xlim(15,8)
plt.ylim(30,70)
plt.title('%s Fifty Thousand Years Ago!'%(name))
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
"""
Explanation: Let's Go Back in Time!
End of explanation
"""
# Now, let's try looking at what this same constellation will look like in 50,000 years!
plt.scatter(datatwo_con.ra+datatwo_con.pmra/1000/3600/15*50000,datatwo_con.dec+datatwo_con.pmdec/1000/3600*50000)
plt.xlim(15,8)
plt.ylim(30,70)
plt.title('%s Fifty Thousand Years From Now!'%(name))
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
"""
Explanation: Let's Go Into the Future!
End of explanation
"""
# Make a Hertzsprung-Russell Diagram!
"""
Explanation: Now you try one of your own!
End of explanation
"""
|
vzg100/Post-Translational-Modification-Prediction | .ipynb_checkpoints/Phosphorylation Sequence Tests -MLP -dbptm+ELM -scalesTrain-VectorAvr.-checkpoint.ipynb | mit | from pred import Predictor
from pred import sequence_vector
from pred import chemical_vector
"""
Explanation: Template for test
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
scale = [-1, "standard", "robust", "minmax", "max"]
for i in par:
for j in scale:
print("y", i, " ", j)
y = Predictor()
y.load_data(file="Data/Training/clean_s_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i, random_data=0)
y.supervised_training("mlp_adam", scale=j)
y.benchmark("Data/Benchmarks/phos.csv", "S")
del y
print("x", i, " ", j)
x = Predictor()
x.load_data(file="Data/Training/clean_s_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i, random_data=1)
x.supervised_training("mlp_adam", scale=j)
x.benchmark("Data/Benchmarks/phos.csv", "S")
del x
"""
Explanation: Controlling for Random Negatve vs Sans Random in Imbalanced Techniques using S, T, and Y Phosphorylation.
Included is N Phosphorylation however no benchmarks are available, yet.
Training data is from phospho.elm and benchmarks are from dbptm.
Note: SMOTEEN seems to preform best
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
scale = [-1, "standard", "robust", "minmax", "max"]
for i in par:
for j in scale:
print("y", i, " ", j)
y = Predictor()
y.load_data(file="Data/Training/clean_s_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i, random_data=0)
y.supervised_training("mlp_adam", scale=j)
y.benchmark("Data/Benchmarks/phos.csv", "Y")
del y
print("x", i, " ", j)
x = Predictor()
x.load_data(file="Data/Training/clean_s_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i, random_data=1)
x.supervised_training("mlp_adam", scale=j)
x.benchmark("Data/Benchmarks/phos.csv", "Y")
del x
"""
Explanation: Y Phosphorylation
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
scale = [-1, "standard", "robust", "minmax", "max"]
for i in par:
for j in scale:
print("y", i, " ", j)
y = Predictor()
y.load_data(file="Data/Training/clean_s_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i, random_data=0)
y.supervised_training("mlp_adam", scale=j)
y.benchmark("Data/Benchmarks/phos.csv", "T")
del y
print("x", i, " ", j)
x = Predictor()
x.load_data(file="Data/Training/clean_s_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i, random_data=1)
x.supervised_training("mlp_adam", scale=j)
x.benchmark("Data/Benchmarks/phos.csv", "T")
del x
"""
Explanation: T Phosphorylation
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.