text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data
Step2: 1) Colors autoselection
Step3: Now... you don't have to get stuck with the default palette. Bokeh comes with a pre-built list of palettes.
Step4: 2) Interaction out of the box - the toolbar
Step5: 3) Groupping and aggregation built-in
Step6: Another nice feature is groupping, which in tandem with aggregations, can provide further insight into the displayed data. For instance, in the chart below we are again showing the average price per clarity, but now groupped per cut type.
|
<ASSISTANT_TASK:>
Python Code:
from bokeh.io import output_notebook, show
output_notebook()
import pandas as pd
diamonds = pd.read_csv('./data/diamonds.csv')
diamonds = diamonds.sample(n=1000)
diamonds.head()
from bokeh.charts import Scatter, Histogram, Bar
p = Scatter(diamonds, color='cut', x='carat', y='price', title='Price of diamonds by carats')
show(p)
from bokeh.palettes import YlGn6
from bokeh.charts import Scatter, Histogram, Bar
p = Scatter(diamonds, color='cut', x='carat', y='price', title='Price of diamonds by carats', palette=YlGn6)
show(p)
p = Bar(diamonds, 'cut', values='price', title="Sum of carats per diamond cut", color = 'cut',
toolbar_location="right", tools='pan,wheel_zoom, undo')
show(p)
p = Bar(diamonds, 'clarity', values='price', title="Average price per clarity", color = 'clarity',
toolbar_location="right", agg='mean')
show(p)
p = Bar(diamonds, 'clarity', values='price', title="Avg price per cut and clarity", color = 'cut',
toolbar_location="right", agg='mean', group='cut', legend="top_right")
show(p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Übungsblatt 7
Step3: Aufgabe 2 CFG
Step5: Hausaufgaben
Step6: Aufgabe 8 Adverben und Verbzweitstellung
|
<ASSISTANT_TASK:>
Python Code:
grammar =
S -> NP VP
NP -> DET[GEN=?x] NOM[GEN=?x]
NOM[GEN=?x] -> ADJ NOM[GEN=?x] | N[GEN=?x]
ADJ -> "schöne" | "kluge" | "dicke"
DET[GEN=mask,KAS=nom] -> "der"
DET[GEN=fem,KAS=dat] -> "der"
DET[GEN=fem,KAS=nom] -> "die"
DET[GEN=fem,KAS=akk] -> "die"
DET[GEN=neut,KAS=nom] -> "das"
DET[GEN=neut,KAS=akk] -> "das"
N[GEN=mask] -> "Mann"
N[GEN=fem] -> "Frau"
N[GEN=neut] -> "Buch"
VP -> V NP NP | V NP | V
V -> "gibt" | "schenkt" | "schläft" | "gefällt" | "kennt"
import nltk
from IPython.display import display
import sys
def test_grammar(grammar, sentences):
cfg = nltk.grammar.FeatureGrammar.fromstring(grammar)
parser = nltk.parse.FeatureEarleyChartParser(cfg)
for i, sent in enumerate(sentences, 1):
print("Satz {}: {}".format(i, sent))
sys.stdout.flush()
results = parser.parse(sent.split())
analyzed = False
for tree in results:
display(tree) # tree.draw() oder print(tree)
analyzed = True
if not analyzed:
print("Keine Analyse möglich", file=sys.stderr)
sys.stderr.flush()
pos_sentences = [
"der Mann schläft",
"der schöne Mann schläft",
"der Mann gibt der Frau das Buch"
]
neg_sentences = ["das Mann schläft", "das schöne Mann schläft"]
test_grammar(grammar, neg_sentences)
test_grammar(grammar, pos_sentences)
grammar =
S -> NP[KAS=nom] VP
NP[KAS=?y] -> DET[GEN=?x,KAS=?y] NOM[GEN=?x]
NOM[GEN=?x] -> ADJ NOM[GEN=?x] | N[GEN=?x]
ADJ -> "schöne" | "kluge" | "dicke"
DET[GEN=mask,KAS=nom] -> "der"
DET[GEN=fem,KAS=dat] -> "der"
DET[GEN=fem,KAS=nom] -> "die"
DET[GEN=fem,KAS=akk] -> "die"
DET[GEN=neut,KAS=nom] -> "das"
DET[GEN=neut,KAS=akk] -> "das"
N[GEN=mask] -> "Mann"
N[GEN=fem] -> "Frau"
N[GEN=neut] -> "Buch"
VP -> V[SUBCAT=ditr, VAL1=?x, VAL2=?y] NP[KAS=?x] NP[KAS=?y]
VP -> V[VAL=?x,SUBCAT=tr] NP[KAS=?x]
VP -> V[SUBCAT=intr]
V[SUBCAT=ditr, VAL1=dat, VAL2=akk] -> "gibt" | "schenkt"
V[SUBCAT=intr] -> "schläft"
V[SUBCAT=tr,VAL=dat] -> "gefällt"
V[SUBCAT=tr,VAL=akk] -> "kennt"
pos_sentences.extend([
"das Buch gefällt der Frau",
"das Buch kennt die Frau"
])
neg_sentences.extend([
"der Mann schläft das Buch",
"die Frau gefällt das Buch",
"das Buch kennt",
"die Frau gibt das Buch",
"die Frau gibt die Frau das Buch"
])
test_grammar(grammar, pos_sentences)
test_grammar(grammar, neg_sentences)
grammar =
BITTE NACH BEARBEITUNG VON (2) VON OBEN KOPIEREN
pos_sentences.extend([
"die Männer geben der Frau das Buch",
"die Bücher gefallen der Frau",
"die Frauen schlafen"
])
neg_sentences.extend([
"der Mann geben der Frau das Buch",
"das Buch gefällt der Frauen",
"die Frauen schläft"
])
pos_sentences.extend([
"heute gibt der Mann der Frau das Buch",
"der Mann gibt heute der Frau das Buch",
"der Mann gibt der Frau heute das Buch",
"der Mann gibt der Frau das Buch heute"
])
neg_sentences.extend([
"heute der Mann gibt der Frau das Buch"
])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Make an edit object
Step2: 3. Make the variant
Step3: Important
|
<ASSISTANT_TASK:>
Python Code:
import hgvs.location
import hgvs.posedit
start = hgvs.location.BaseOffsetPosition(base=200,offset=-6,datum=hgvs.location.Datum.CDS_START)
start, str(start)
end = hgvs.location.BaseOffsetPosition(base=22,datum=hgvs.location.Datum.CDS_END)
end, str(end)
iv = hgvs.location.Interval(start=start,end=end)
iv, str(iv)
import hgvs.edit, hgvs.posedit
edit = hgvs.edit.NARefAlt(ref='A',alt='T')
edit, str(edit)
posedit = hgvs.posedit.PosEdit(pos=iv,edit=edit)
posedit, str(posedit)
import hgvs.sequencevariant
var = hgvs.sequencevariant.SequenceVariant(ac='NM_01234.5', type='c', posedit=posedit)
var, str(var)
import copy
var2 = copy.deepcopy(var)
var2.posedit.pos.start.base=456
str(var2)
var2 = copy.deepcopy(var)
var2.posedit.edit.alt='CT'
str(var2)
var2 = copy.deepcopy(var)
str(var2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then load some data.
Step2: Benchmark classificator by ml-benchmarks
|
<ASSISTANT_TASK:>
Python Code:
import numpy; print('numpy:\t', numpy.__version__, sep='\t')
import scipy; print('scipy:\t', scipy.__version__, sep='\t')
import matplotlib; print('matplotlib:', matplotlib.__version__, sep='\t')
import sklearn; print('scikit-learn:', sklearn.__version__, sep='\t')
from sklearn import datasets
#datasets.load_ -> [press tab for completion]
iris = datasets.load_iris()
iris.keys()
for k in iris.keys():
print('\n== ', k, '==\n', str(iris[k])[0:390])
for k in iris.keys():
print(k, ':', type(iris[k]))
[(k, iris[k].shape) for k in iris.keys() if type(iris[k]) == numpy.ndarray]
# note: this also imports numpy as np, imports matplotlib.pyplot as plt, and others
%pylab inline
def dtime_to_seconds(dtime):
return dtime.seconds + (dtime.microseconds * 1e-6)
def bench(func, data, n=10):
assert n > 2
score = np.inf
try:
time = []
for i in range(n):
score, t = func(*data)
time.append(dtime_to_seconds(t))
# remove extremal values
time.pop(np.argmax(time))
time.pop(np.argmin(time))
except Exception as detail:
print('%s error in function %s: ', (repr(detail), func))
time = []
return score, np.array(time)
def bench_skl(X, y, T, valid):
from sklearn import linear_model, ensemble
start = datetime.now()
# http://scikit-learn.org/stable/modules/classes.html
clf = ensemble.RandomForestClassifier(n_estimators=1000, n_jobs=5, verbose=0)
#clf = linear_model.ElasticNet(alpha=0.5, l1_ratio=0.5)
#clf = linear_model.LogisticRegression()
#clf = neighbors.NeighborsClassifier(n_neighbors=n_neighbors, algorithm='brute_inplace')
#clf = skl_cluster.KMeans(k=n_components, n_init=1)
#...
clf.fit(X, y)
## Regression
# pred = clf.predict(T)
# delta = datetime.now() - start
# mse = np.linalg.norm(pred - valid, 2) ** 2
# return mse, delta
# Classification
score = np.mean(clf.predict(T) == valid)
return score, datetime.now() - start
from sklearn import datasets
import numpy as np
from datetime import datetime
iris = datasets.load_iris()
sample_range = np.random.random_sample(size=iris.target.shape[0])
TH = 0.7
X = np.array([(iris.data[i,]) for i in range(len(iris.target)) if sample_range[i] >= TH])
Y = np.array([(iris.target[i,]) for i in range(len(iris.target)) if sample_range[i] >= TH])
T = np.array([(iris.data[i,]) for i in range(len(iris.target)) if sample_range[i] < TH])
valid = np.array([(iris.target[i,]) for i in range(len(iris.target)) if sample_range[i] < TH])
num_tries = 25
score, times = bench(bench_skl, (X,Y,T,valid), num_tries)
print('Tries:', num_tries, 'Score:', score, 'Time:', np.mean(times), '(mean)', np.median(times), '(median)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Show topography for two different conditions
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks='meg', baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We want to solve the following
Step3: Let's try evaluating this on some sinusoidal data, with a frequency of 10 cycles per unit time
Step4: As expected, the NFFT shows strong features at wave numbers $\pm 10$.
Step6: The expanded algorithm
Step8: Speedup #1
Step9: Speedup #2
Step11: By design, each row of the matrix contains just a single nonzero clump of entries, of width approximately $2m$.
Step12: Choosing m
Step15: Let's add this to our nfft function
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def ndft(x, f, N):
non-equispaced discrete Fourier transform
k = -(N // 2) + np.arange(N)
return np.dot(f, np.exp(2j * np.pi * k * x[:, np.newaxis]))
x = -0.5 + np.random.rand(1000)
f = np.sin(10 * 2 * np.pi * x)
k = -20 + np.arange(40)
f_k = ndft(x, f, len(k))
plt.plot(k, f_k.real, label='real')
plt.plot(k, f_k.imag, label='imag')
plt.legend()
# equations C.1 from https://www-user.tu-chemnitz.de/~potts/paper/nfft3.pdf
def phi(x, n, m, sigma):
b = (2 * sigma * m) / ((2 * sigma - 1) * np.pi)
return np.exp(-(n * x) ** 2 / b) / np.sqrt(np.pi * b)
def phi_hat(k, n, m, sigma):
b = (2 * sigma * m) / ((2 * sigma - 1) * np.pi)
return np.exp(-b * (np.pi * k / n) ** 2)
from numpy.fft import fft, fftshift, ifftshift
N = 1000
sigma = 1
n = N * sigma
m = 20
# compute phi(x)
x = np.linspace(-0.5, 0.5, N, endpoint=False)
f = phi(x, n, m, sigma)
# compute phi_hat(k)
k = -(N // 2) + np.arange(N)
f_hat = phi_hat(k, n, m, sigma)
# compute the FFT of phi(x)
f_fft = fftshift(fft(ifftshift(f)))
# assure they match
np.allclose(f_fft, f_hat)
import numpy as np
def nfft1(x, f, N, sigma=2):
Alg 3 from https://www-user.tu-chemnitz.de/~potts/paper/nfft3.pdf
n = N * sigma # size of oversampled grid
m = 20 # magic number: we'll set this more carefully later
# 1. Express f(x) in terms of basis functions phi
shift_to_range = lambda x: -0.5 + (x + 0.5) % 1
x_grid = np.linspace(-0.5, 0.5, n, endpoint=False)
g = np.dot(f, phi(shift_to_range(x[:, None] - x_grid), n, m, sigma))
# 2. Compute the Fourier transform of g on the oversampled grid
k = -(N // 2) + np.arange(N)
g_k = np.dot(g, np.exp(2j * np.pi * k * x_grid[:, None]))
# 3. Divide by the Fourier transform of the convolution kernel
f_k = g_k / phi_hat(k, n, m, sigma)
return f_k
x = -0.5 + np.random.rand(1000)
f = np.sin(10 * 2 * np.pi * x)
N = 100
np.allclose(ndft(x, f, N),
nfft1(x, f, N))
import numpy as np
from numpy.fft import fft, ifft, fftshift, ifftshift
def nfft2(x, f, N, sigma=2):
Alg 3 from https://www-user.tu-chemnitz.de/~potts/paper/nfft3.pdf
n = N * sigma # size of oversampled grid
m = 20 # magic number: we'll set this more carefully later
# 1. Express f(x) in terms of basis functions phi
shift_to_range = lambda x: -0.5 + (x + 0.5) % 1
x_grid = np.linspace(-0.5, 0.5, n, endpoint=False)
g = np.dot(f, phi(shift_to_range(x[:, None] - x_grid), n, m, sigma))
# 2. Compute the Fourier transform of g on the oversampled grid
k = -(N // 2) + np.arange(N)
g_k_n = fftshift(ifft(ifftshift(g)))
g_k = n * g_k_n[(n - N) // 2: (n + N) // 2]
# 3. Divide by the Fourier transform of the convolution kernel
f_k = g_k / phi_hat(k, n, m, sigma)
return f_k
x = -0.5 + np.random.rand(1000)
f = np.sin(10 * 2 * np.pi * x)
N = 100
np.allclose(ndft(x, f, N),
nfft2(x, f, N))
sigma = 3
n = sigma * N
m = 20
x_grid = np.linspace(-0.5, 0.5, n, endpoint=False)
shift_to_range = lambda x: -0.5 + (x + 0.5) % 1
mat = phi(shift_to_range(x[:, None] - x_grid), n, m, sigma)
plt.imshow(mat, aspect='auto')
plt.colorbar()
from scipy.sparse import csr_matrix
col_ind = np.floor(n * x[:, np.newaxis]).astype(int) + np.arange(-m, m)
vals = phi(shift_to_range(x[:, None] - col_ind / n), n, m, sigma)
col_ind = (col_ind + n // 2) % n
row_ptr = np.arange(len(x) + 1) * col_ind.shape[1]
spmat = csr_matrix((vals.ravel(), col_ind.ravel(), row_ptr), shape=(len(x), n))
plt.imshow(spmat.toarray(), aspect='auto')
plt.colorbar()
np.allclose(spmat.toarray(), mat)
import numpy as np
from numpy.fft import fft, ifft, fftshift, ifftshift
def nfft3(x, f, N, sigma=2):
Alg 3 from https://www-user.tu-chemnitz.de/~potts/paper/nfft3.pdf
n = N * sigma # size of oversampled grid
m = 20 # magic number: we'll set this more carefully later
# 1. Express f(x) in terms of basis functions phi
shift_to_range = lambda x: -0.5 + (x + 0.5) % 1
col_ind = np.floor(n * x[:, np.newaxis]).astype(int) + np.arange(-m, m)
vals = phi(shift_to_range(x[:, None] - col_ind / n), n, m, sigma)
col_ind = (col_ind + n // 2) % n
row_ptr = np.arange(len(x) + 1) * col_ind.shape[1]
mat = csr_matrix((vals.ravel(), col_ind.ravel(), row_ptr), shape=(len(x), n))
g = mat.T.dot(f)
# 2. Compute the Fourier transform of g on the oversampled grid
k = -(N // 2) + np.arange(N)
g_k_n = fftshift(ifft(ifftshift(g)))
g_k = n * g_k_n[(n - N) // 2: (n + N) // 2]
# 3. Divide by the Fourier transform of the convolution kernel
f_k = g_k / phi_hat(k, n, m, sigma)
return f_k
x = -0.5 + np.random.rand(1000)
f = np.sin(10 * 2 * np.pi * x)
N = 100
np.allclose(ndft(x, f, N),
nfft3(x, f, N))
def C_phi(m, sigma):
return 4 * np.exp(-m * np.pi * (1 - 1. / (2 * sigma - 1)))
def m_from_C_phi(C, sigma):
return np.ceil(-np.log(0.25 * C) / (np.pi * (1 - 1 / (2 * sigma - 1))))
import numpy as np
from numpy.fft import fft, ifft, fftshift, ifftshift
def nfft(x, f, N, sigma=2, tol=1E-8):
Alg 3 from https://www-user.tu-chemnitz.de/~potts/paper/nfft3.pdf
n = N * sigma # size of oversampled grid
m = m_from_C_phi(tol / N, sigma)
# 1. Express f(x) in terms of basis functions phi
shift_to_range = lambda x: -0.5 + (x + 0.5) % 1
col_ind = np.floor(n * x[:, np.newaxis]).astype(int) + np.arange(-m, m)
vals = phi(shift_to_range(x[:, None] - col_ind / n), n, m, sigma)
col_ind = (col_ind + n // 2) % n
indptr = np.arange(len(x) + 1) * col_ind.shape[1]
mat = csr_matrix((vals.ravel(), col_ind.ravel(), indptr), shape=(len(x), n))
g = mat.T.dot(f)
# 2. Compute the Fourier transform of g on the oversampled grid
k = -(N // 2) + np.arange(N)
g_k_n = fftshift(ifft(ifftshift(g)))
g_k = n * g_k_n[(n - N) // 2: (n + N) // 2]
# 3. Divide by the Fourier transform of the convolution kernel
f_k = g_k / phi_hat(k, n, m, sigma)
return f_k
x = -0.5 + np.random.rand(1000)
f = np.sin(10 * 2 * np.pi * x)
N = 100
np.allclose(ndft(x, f, N),
nfft(x, f, N))
from pynfft import NFFT
def cnfft(x, f, N):
Compute the nfft with pynfft
plan = NFFT(N, len(x))
plan.x = x
plan.precompute()
plan.f = f
# need to return a copy because of a
# reference counting bug in pynfft
return plan.adjoint().copy()
np.allclose(cnfft(x, f, N),
nfft(x, f, N))
x = -0.5 + np.random.rand(10000)
f = np.sin(10 * 2 * np.pi * x)
N = 10000
#print("direct ndft:")
#%timeit ndft(x, f, N)
#print()
print("fast nfft:")
%timeit nfft(x, f, N)
print()
print("wrapped C-nfft/pynfft package:")
%timeit cnfft(x, f, N)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Stochastic gradient descent (SGD)
Step2: We now write an SGD code for this problem. The training_data is a list of tuples (x, y) representing the training inputs and corresponding desired outputs. The variables epochs and mini_batch_size are what you'd expect - the number of epochs to train for, and the size of the mini-batches to use when sampling. eta is the learning rate, $\eta$. If the optional argument test_data is supplied, then the program will evaluate the network after each epoch of training, and print out partial progress. This is useful for tracking progress, but slows things down substantially.
Step3: Challenge 14.2
Step6: You will need the following auxiliary functions
Step11: A simple network to classify handwritten digits
Step22: Note also that the biases and weights are stored as lists of Numpy matrices. So, for example net.weights[1] is a Numpy matrix storing the weights connecting the second and third layers of neurons. (It's not the first and second layers, since Python's list indexing starts at 0.) Since net.weights[1] is rather verbose, let's just denote that matrix $w$
Step23: We first load the MNIST data
Step24: After loading the MNIST data, we'll set up a Network with 30 hidden neurons.
Step25: Finally, we'll use stochastic gradient descent to learn from the MNIST training_data over 30 epochs, with a mini-batch size of 10, and a learning rate of $\eta$=3.0
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot
pyplot.rcParams['image.cmap'] = 'jet'
import numpy as np
x0 = -1.4
y0 = 0.5
x = [x0] # The algorithm starts at x0, y0
y = [y0]
eta = 0.1 # step size multiplier
precision = 0.00001
def f(x,y):
f1 = x**2/2-y**2/4+3
f2 = 2*x+1-np.exp(y)
return np.sin(f1)*np.cos(f2)
def gradf(x,y):
f1 = x**2/2-y**2/4+3
f2 = 2*x+1-np.exp(y)
dx = np.cos(f1)*np.cos(f2)*x-np.sin(f1)*np.sin(f2)*2.
dy = np.cos(f1)*np.cos(f2)*(-y/2.)-np.sin(f1)*np.sin(f2)*(-np.exp(y))
return (dx,dy)
err = 100.
while err > precision:
(step_x, step_y) = gradf(x0, y0)
x0 -= eta*step_x
y0 -= eta*step_y
x.append(x0)
y.append(y0)
err = eta*(abs(step_x)+abs(step_y))
print(x0,y0)
#### All this below is just to visualize the process
dx = 0.05
dy = 0.05
xx = np.arange(-1.5, 1.+dx, dx)
yy = np.arange(0., 2.+dy, dy)
V = np.zeros(shape=(len(yy),len(xx)))
for iy in range(0,len(yy)):
for ix in range(0,len(xx)):
V[iy,ix] = f(xx[ix],yy[iy])
X, Y = np.meshgrid(xx, yy)
pyplot.contour(X, Y, V)
#pyplot.plot(x,y,linestyle='--', lw=3);
pyplot.scatter(x,y);
pyplot.ylabel("y")
pyplot.xlabel("x");
%matplotlib inline
from matplotlib import pyplot
import numpy as np
a = 1
b = 2
num_points = 100
np.random.seed(637163) # we make sure we always generate the same sequence
x_data = np.random.rand(num_points)*20.
y_data = x_data*b+a+3*(2.*np.random.rand(num_points)-1)
pyplot.scatter(x_data,y_data)
pyplot.plot(x_data, b*x_data+a)
#### Least squares fit
sum_x = np.sum(x_data)
sum_y = np.sum(y_data)
sum_x2 = np.sum(x_data**2)
sum_xy = np.sum(x_data*y_data)
det = num_points*sum_x2-sum_x**2
fit_a = (sum_y*sum_x2-sum_x*sum_xy)/det
fit_b = (num_points*sum_xy-sum_x*sum_y)/det
print(fit_a,fit_b)
pyplot.xlim(-1,22)
pyplot.ylim(-1,24)
pyplot.plot(x_data, fit_b*x_data+fit_a);
epochs = 1000
mini_batch_size = 10
eta = 0.01/mini_batch_size
a = 3.
b = 3.
def update_mini_batch(mini_batch, eta):
global a, b
a0 = a
b0 = b
for x, y, in mini_batch:
e = eta*(a0+b0*x-y)
a -= e
b -= x*e
training_data = list(zip(x_data,y_data))
for j in range(epochs):
np.random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size]
for k in range(0, len(training_data), mini_batch_size)]
for mini_batch in mini_batches:
update_mini_batch(mini_batch, eta)
print ("Epoch {0}: {1} {2}".format(j,a,b))
### We provide a set of randomly generated training points
num_points = 100
w1 = -2.5
w2 = 1.5
w0 = 3.
np.random.seed(637163) # we make sure we always generate the same sequence
x_data = np.random.rand(num_points)*10.
y_data = np.random.rand(num_points)*10.
z_data = np.zeros(num_points)
for i in range(len(z_data)):
if (y_data[i] > (-w0-w1*x_data[i])/w2):
z_data[i] = 1.
pyplot.scatter(x_data,y_data,c=z_data,marker='o',linewidth=1.5,edgecolors='black')
pyplot.plot(x_data,(-w1*x_data-w0)/w2)
pyplot.gray()
pyplot.xlim(0,10)
pyplot.ylim(0,10);
def sigmoid(z):
The sigmoid function.
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
Derivative of the sigmoid function.
return sigmoid(z)*(1-sigmoid(z))
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
#### Libraries
# Standard library
import pickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
f = gzip.open('data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding='latin1')
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code.
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network.
e = np.zeros((10, 1))
e[j] = 1.0
return e
network.py
~~~~~~~~~~
A module to implement the stochastic gradient descent learning
algorithm for a feedforward neural network. Gradients are calculated
using backpropagation. Note that I have focused on making the code
simple, easily readable, and easily modifiable. It is not optimized,
and omits many desirable features.
#### Libraries
# Standard library
import random
# Third-party libraries
import numpy as np
class Network(object):
def __init__(self, sizes):
The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers.
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
Return the output of the network if ``a`` is input.
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially.
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print ("Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test))
else:
print ("Epoch {0} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate.
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``.
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation.
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
Return the vector of partial derivatives \partial C_x /
\partial a for the output activations.
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
The sigmoid function.
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
Derivative of the sigmoid function.
return sigmoid(z)*(1-sigmoid(z))
training_data, validation_data, test_data = load_data_wrapper()
net = Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lame params
Step2: Metric tensor
Step3: ${\displaystyle \hat{G}=\sum_{i,j} g_{ij}\vec{R}^i\vec{R}^j}$
Step4: Christoffel symbols
Step5: Gradient of vector
Step6: Physical coordinates
Step7: Strain tensor
Step8: Virtual work
Step9: Tymoshenko theory
Step10: Square theory
Step11: Mass matrix
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from geom_util import *
from sympy.vector import CoordSys3D
N = CoordSys3D('N')
alpha1, alpha2, alpha3 = symbols("alpha_1 alpha_2 alpha_3", real = True, positive=True)
init_printing()
%matplotlib inline
%reload_ext autoreload
%autoreload 2
%aimport geom_util
H1=symbols('H1')
H2=S(1)
H3=S(1)
H=[H1, H2, H3]
DIM=3
dH = zeros(DIM,DIM)
for i in range(DIM):
for j in range(DIM):
if (i == 0 and j != 1):
dH[i,j]=Symbol('H_{{{},{}}}'.format(i+1,j+1))
dH
G_up = getMetricTensorUpLame(H1, H2, H3)
G_down = getMetricTensorDownLame(H1, H2, H3)
DIM=3
G_down_diff = MutableDenseNDimArray.zeros(DIM, DIM, DIM)
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
G_down_diff[i,i,k]=2*H[i]*dH[i,k]
GK = getChristoffelSymbols2(G_up, G_down_diff, (alpha1, alpha2, alpha3))
GK
def row_index_to_i_j_grad(i_row):
return i_row // 3, i_row % 3
B = zeros(9, 12)
B[0,1] = S(1)
B[1,2] = S(1)
B[2,3] = S(1)
B[3,5] = S(1)
B[4,6] = S(1)
B[5,7] = S(1)
B[6,9] = S(1)
B[7,10] = S(1)
B[8,11] = S(1)
for row_index in range(9):
i,j=row_index_to_i_j_grad(row_index)
B[row_index, 0] = -GK[i,j,0]
B[row_index, 4] = -GK[i,j,1]
B[row_index, 8] = -GK[i,j,2]
B
P=zeros(12,12)
P[0,0]=H[0]
P[1,0]=dH[0,0]
P[1,1]=H[0]
P[2,0]=dH[0,1]
P[2,2]=H[0]
P[3,0]=dH[0,2]
P[3,3]=H[0]
P[4,4]=H[1]
P[5,4]=dH[1,0]
P[5,5]=H[1]
P[6,4]=dH[1,1]
P[6,6]=H[1]
P[7,4]=dH[1,2]
P[7,7]=H[1]
P[8,8]=H[2]
P[9,8]=dH[2,0]
P[9,9]=H[2]
P[10,8]=dH[2,1]
P[10,10]=H[2]
P[11,8]=dH[2,2]
P[11,11]=H[2]
P=simplify(P)
P
B_P = zeros(9,9)
for i in range(3):
for j in range(3):
row_index = i*3+j
B_P[row_index, row_index] = 1/(H[i]*H[j])
Grad_U_P = simplify(B_P*B*P)
Grad_U_P
E=zeros(6,9)
E[0,0]=1
E[1,4]=1
E[2,8]=1
E[3,1]=1
E[3,3]=1
E[4,2]=1
E[4,6]=1
E[5,5]=1
E[5,7]=1
E
StrainL=simplify(E*Grad_U_P)
StrainL
def E_NonLinear(grad_u):
N = 3
du = zeros(N, N)
# print("===Deformations===")
for i in range(N):
for j in range(N):
index = i*N+j
du[j,i] = grad_u[index]
# print("========")
I = eye(3)
a_values = S(1)/S(2) * du * G_up
E_NL = zeros(6,9)
E_NL[0,0] = a_values[0,0]
E_NL[0,3] = a_values[0,1]
E_NL[0,6] = a_values[0,2]
E_NL[1,1] = a_values[1,0]
E_NL[1,4] = a_values[1,1]
E_NL[1,7] = a_values[1,2]
E_NL[2,2] = a_values[2,0]
E_NL[2,5] = a_values[2,1]
E_NL[2,8] = a_values[2,2]
E_NL[3,1] = 2*a_values[0,0]
E_NL[3,4] = 2*a_values[0,1]
E_NL[3,7] = 2*a_values[0,2]
E_NL[4,0] = 2*a_values[2,0]
E_NL[4,3] = 2*a_values[2,1]
E_NL[4,6] = 2*a_values[2,2]
E_NL[5,2] = 2*a_values[1,0]
E_NL[5,5] = 2*a_values[1,1]
E_NL[5,8] = 2*a_values[1,2]
return E_NL
%aimport geom_util
u=getUHat3DPlane(alpha1, alpha2, alpha3)
# u=getUHatU3Main(alpha1, alpha2, alpha3)
gradu=B*u
E_NL = E_NonLinear(gradu)*B
E_NL
%aimport geom_util
u=getUHatU3MainPlane(alpha1, alpha2, alpha3)
gradup=Grad_U_P*u
# e=E*gradup
# e
E_NLp = E_NonLinear(gradup)*gradup
simplify(E_NLp)
%aimport geom_util
C_tensor = getIsotropicStiffnessTensor()
C = convertStiffnessTensorToMatrix(C_tensor)
C
StrainL.T*C*StrainL*H1
T=zeros(12,6)
T[0,0]=1
T[0,2]=alpha3
T[1,1]=1
T[1,3]=alpha3
T[3,2]=1
T[8,4]=1
T[9,5]=1
T
D_p_T = StrainL*T
simplify(D_p_T)
u = Function("u")
t = Function("theta")
w = Function("w")
u1=u(alpha1)+alpha3*t(alpha1)
u3=w(alpha1)
gu = zeros(12,1)
gu[0] = u1
gu[1] = u1.diff(alpha1)
gu[3] = u1.diff(alpha3)
gu[8] = u3
gu[9] = u3.diff(alpha1)
gradup=Grad_U_P*gu
# E_NLp = E_NonLinear(gradup)*gradup
# simplify(E_NLp)
# gradup=Grad_U_P*gu
# o20=(K*u(alpha1)-w(alpha1).diff(alpha1)+t(alpha1))/2
# o21=K*t(alpha1)
# O=1/2*o20*o20+alpha3*o20*o21-alpha3*K/2*o20*o20
# O=expand(O)
# O=collect(O,alpha3)
# simplify(O)
StrainNL = E_NonLinear(gradup)*gradup
StrainL*gu+simplify(StrainNL)
L=zeros(12,12)
h=Symbol('h')
p0=1/2-alpha3/h
p1=1/2+alpha3/h
p2=1-(2*alpha3/h)**2
L[0,0]=p0
L[0,2]=p1
L[0,4]=p2
L[1,1]=p0
L[1,3]=p1
L[1,5]=p2
L[3,0]=p0.diff(alpha3)
L[3,2]=p1.diff(alpha3)
L[3,4]=p2.diff(alpha3)
L[8,6]=p0
L[8,8]=p1
L[8,10]=p2
L[9,7]=p0
L[9,9]=p1
L[9,11]=p2
L[11,6]=p0.diff(alpha3)
L[11,8]=p1.diff(alpha3)
L[11,10]=p2.diff(alpha3)
L
D_p_L = StrainL*L
simplify(D_p_L)
p0_2=p0*p0
p01=p0*p1
p02=p0*p2
p1_2=p1*p1
p12=p1*p2
p2_2=p2*p2
p0_2i=integrate(p0_2, (alpha3, -h/2, h/2))
p01i=integrate(p01, (alpha3, -h/2, h/2))
p02i=integrate(p02, (alpha3, -h/2, h/2))
p1_2i=integrate(p1_2, (alpha3, -h/2, h/2))
p12i=integrate(p12, (alpha3, -h/2, h/2))
p2_2i=integrate(p2_2, (alpha3, -h/2, h/2))
# p0_2i = simplify(p0_2i)
# p01i = expand(simplify(p01i))
# p02i = expand(simplify(p02i))
# p1_2i = expand(simplify(p1_2i))
# p12i = expand(simplify(p12i))
# p2_2i = expand(simplify(p2_2i))
p0_2i
p01i
p02i
p1_2i
p12i
p2_2i
1/6
Ct=getOrthotropicStiffnessTensor()
C=convertStiffnessTensorToMatrix(Ct)
LC=zeros(6,9)
LC[0,0]=p0
LC[0,1]=p1
LC[0,2]=p2
LC[2,3]=p0
LC[2,4]=p1
LC[2,5]=p2
LC[4,6]=p0
LC[4,7]=p1
LC[4,8]=p2
e = LC.T*C*LC
integrate(e, (alpha3, -h/2, h/2))
rho=Symbol('rho')
B_h=zeros(3,12)
B_h[0,0]=1
B_h[1,4]=1
B_h[2,8]=1
M=simplify(rho*P.T*B_h.T*G_up*B_h*P)
M
M_p = L.T*M*L
integrate(M_p, (alpha3, -h/2, h/2))
omega, t=symbols("\omega, t")
c=cos(omega*t)
c2=cos(omega*t)*cos(omega*t)
c3=cos(omega*t)*cos(omega*t)*cos(omega*t)
T=2*pi/omega
# omega*T/4
integrate(c, (t, 0, T/4))/T
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Define a function for modeling and cross-validation
Step3: Baseline Model
Step4: GBM Models
Step5: So we got 60 as the optimal estimators for the 0.1 learning rate. Note that 60 is a reasonable value and can be used as it is. But it might not be the same in all cases. Other situations
Step6: Since we reached the maximum of min_sales_split, we should check higher values as well. Also, we can tune min_samples_leaf with it now as max_depth is fixed. One might argue that max depth might change for higher value but if you observe the output closely, a max_depth of 9 had a better model for most of cases.
Step7: Tune max_features
Step8: Step3- Tune Subsample and Lower Learning Rate
Step9: With all tuned lets try reducing the learning rate and proportionally increasing the number of estimators to get more robust results
Step10: 1/10th learning rate
Step11: 1/50th learning rate
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation, metrics
from sklearn.grid_search import GridSearchCV
import matplotlib.pylab as plt
%matplotlib inline
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
train = pd.read_csv('train_modified.csv')
target='Disbursed'
IDcol = 'ID'
train['Disbursed'].value_counts()
def modelfit(alg, dtrain, dtest, predictors, performCV=True, printFeatureImportance=True, cv_folds=5):
#Fit the algorithm on the data
alg.fit(dtrain[predictors], dtrain['Disbursed'])
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Perform cross-validation:
if performCV:
cv_score = cross_validation.cross_val_score(alg, dtrain[predictors], dtrain['Disbursed'], cv=cv_folds, scoring='roc_auc')
#Print model report:
print "\nModel Report"
print "Accuracy : %.4g" % metrics.accuracy_score(dtrain['Disbursed'].values, dtrain_predictions)
print "AUC Score (Train): %f" % metrics.roc_auc_score(dtrain['Disbursed'], dtrain_predprob)
if performCV:
print "CV Score : Mean - %.7g | Std - %.7g | Min - %.7g | Max - %.7g" % (np.mean(cv_score),np.std(cv_score),np.min(cv_score),np.max(cv_score))
#Print Feature Importance:
if printFeatureImportance:
feat_imp = pd.Series(alg.feature_importances_, predictors).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
#Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
gbm0 = GradientBoostingClassifier(random_state=10)
modelfit(gbm0, train, test, predictors,printOOB=False)
#Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
param_test1 = {'n_estimators':range(20,81,10)}
gsearch1 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, min_samples_split=500,
min_samples_leaf=50,max_depth=8,max_features='sqrt', subsample=0.8,random_state=10),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(train[predictors],train[target])
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_
#Grid seach on subsample and max_features
param_test2 = {'max_depth':range(5,16,2), 'min_samples_split':range(200,1001,200)}
gsearch2 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=60,
max_features='sqrt', subsample=0.8, random_state=10),
param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch2.fit(train[predictors],train[target])
gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_
#Grid seach on subsample and max_features
param_test3 = {'min_samples_split':range(1000,2100,200), 'min_samples_leaf':range(30,71,10)}
gsearch3 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=60,max_depth=9,
max_features='sqrt', subsample=0.8, random_state=10),
param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch3.fit(train[predictors],train[target])
gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_
modelfit(gsearch3.best_estimator_, train, test, predictors)
#Grid seach on subsample and max_features
param_test4 = {'max_features':range(7,20,2)}
gsearch4 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=60,max_depth=9,
min_samples_split=1200, min_samples_leaf=60, subsample=0.8, random_state=10),
param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch4.fit(train[predictors],train[target])
gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_
#Grid seach on subsample and max_features
param_test5 = {'subsample':[0.6,0.7,0.75,0.8,0.85,0.9]}
gsearch5 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=60,max_depth=9,
min_samples_split=1200, min_samples_leaf=60, subsample=0.8, random_state=10, max_features=7),
param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch5.fit(train[predictors],train[target])
gsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_
#Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
gbm_tuned_1 = GradientBoostingClassifier(learning_rate=0.05, n_estimators=120,max_depth=9, min_samples_split=1200,
min_samples_leaf=60, subsample=0.85, random_state=10, max_features=7)
modelfit(gbm_tuned_1, train, test, predictors)
#Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
gbm_tuned_2 = GradientBoostingClassifier(learning_rate=0.01, n_estimators=600,max_depth=9, min_samples_split=1200,
min_samples_leaf=60, subsample=0.85, random_state=10, max_features=7)
modelfit(gbm_tuned_2, train, test, predictors)
#Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
gbm_tuned_3 = GradientBoostingClassifier(learning_rate=0.005, n_estimators=1200,max_depth=9, min_samples_split=1200,
min_samples_leaf=60, subsample=0.85, random_state=10, max_features=7,
warm_start=True)
modelfit(gbm_tuned_3, train, test, predictors, performCV=False)
#Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
gbm_tuned_4 = GradientBoostingClassifier(learning_rate=0.005, n_estimators=1500,max_depth=9, min_samples_split=1200,
min_samples_leaf=60, subsample=0.85, random_state=10, max_features=7,
warm_start=True)
modelfit(gbm_tuned_4, train, test, predictors, performCV=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Randomness
Step2: jnp vs. np
Step3: grad()
Step4: vmap()
Step5: jit()
Step6: pmap()
Step7: pytrees
|
<ASSISTANT_TASK:>
Python Code:
import jax
import jax.numpy as jnp
import numpy as np
from matplotlib import pyplot as plt
# Check connected accelerators. Depending on what runtime you're connected to,
# this will show a single CPU/GPU, or 8 TPU cores (jf_2x2 aka JellyDonut).
# You can start a TPU runtime via : "Connect to a runtime" -> "Start" ->
# "Borg Runtime" -> "Brain Frameworks JellyDonut (go/ml-colab)"
# https://screenshot.googleplex.com/87HTCpQNhBKUZUp
# See also http://go/research-workflow-intro-deck#colab
jax.devices()
# Local devices: In this case it's the same as all devices, but if you run JAX
# in a multi host setup, then local_devices will only show the devices connected
# to the host running the program.
jax.local_devices()
# Alternatively, you can also connect to GPU runtime.
!nvidia-smi
# YOUR ACTION REQUIRED:
# Your task is to use JAX to generate 5 uniform random numbers and 5 normally
# distributed random numbers.
# Check out the following JAX API calls:
# - jax.random.PRNGKey()
# - jax.random.split()
# - jax.random.uniform()
# - jax.random.normal()
##-snip
# Create initial key from seed.
key = jax.random.PRNGKey(0)
# Derive two keys for consumption and another key to continue the chain.
key1, key2, key = jax.random.split(key, 3)
# Generate some random numbers using the first key.
print(jax.random.uniform(key1, shape=(5,)))
# Generate some random numbers using the second key.
print(jax.random.normal(key2, shape=(5,)))
# Note: using the same key for both random numbers would be a mistake, because
# then both vectors would use the same key and there are no guarantees with
# respect to their independence.
# Let's do some semi-serious matrix multiplication:
k = 3_000
x = np.random.normal(size=[k, k])
# ~3.4s
%time x @ x
# YOUR ACTION REQUIRED: Do the same computation using JAX!
# You should use result.block_until_ready() for a fair comparison.
##-snip
x = jax.random.normal(jax.random.PRNGKey(0), [k, k])
# 201 ms on TPU
%time (x@x).block_until_ready()
# Note the different class of the JAX array. There is additional API e.g. to
# determine on which device the data is stored, check out x.device_buffer
##-snip
x.device_buffer.device()
# Combining jnp & np : Below array initialization is rather slow because we
# create a lot of jnp array. Replace jnp with np and observe the speedup!
%%time
# GPU : 1.79s
# CPU : 1.04s
x = jnp.array([jnp.arange(100) for _ in range(10000)])
print(repr(x))
# YOUR ACTION REQUIRED:
# In this situation we would want to create the array in np and then convert it
# to a jnp array using jnp.array() or jax.device_put().
# (Note that we could use np.tile() here, but that's not the point)
##-snip
# GPU : 0.03s
# CPU : 0.03s
x = np.array([np.arange(100) for _ in range(10000)])
print(repr(x))
# GPU : 0.03s
# CPU : 0.03s
x = jnp.array(np.array([np.arange(100) for _ in range(10000)]))
print(repr(x))
# GPU : 0.03s
# CPU : 0.03s
x = jax.device_put(np.array([np.arange(100) for _ in range(10000)]))
print(repr(x))
def sigmoid(x):
return 0.5 * (1 + jnp.tanh(x))
# YOUR ACTION REQUIRED:
# Use grad() to create a new function that computes the gradient of `sigmoid`.
# Verify the output of the new function at some points.
##-snip
sigmoid_grad = jax.grad(sigmoid)
for x in (-10.0, 0.0, 10.0):
print(x, sigmoid_grad(x))
def f(x, y):
return 2 * x * y**2
# YOUR ACTION REQUIRED:
# Compute df/dx and df/dy with grad()
##-snip
(
jax.grad(f)(2.0, 3.0), # argnums=0 is the default
jax.grad(f, argnums=1)(
2.0, 3.0
), # specify to take gradient wrt 2nd argument
jax.grad(lambda y, x: f(x, y))(2.0, 3.0), # same result using a lambda
)
# Now let's plot the gradient of the sigmoid function in the range [05, 5]
xs = jnp.linspace(-5, 5, 100)
# We can of course evaluate the gradient at every position separately:
grads = [jax.grad(sigmoid)(x) for x in xs]
plt.plot(xs, grads)
# But JAX can "vectorize" our gradient function for us automatically.
# YOUR ACTION REQUIRED:
# Read the documentation about `vmap` and reimplement the plot without a Python
# loop.
##-snip
plt.plot(xs, jax.vmap(jax.grad(sigmoid))(xs));
# Another vmap() example : Let's re-implement matmul using vector dot product:
vdp = lambda v1, v2: v1.dot(v2)
# Vector dot product:
vdp(jnp.arange(1, 4), jnp.arange(1, 4))
# Matrix vector product:
mvp = jax.vmap(vdp, in_axes=(0, None), out_axes=0)
# Matrix matrix product:
mmp = jax.vmap(mvp, in_axes=(None, 1), out_axes=1)
# Verify result.
m1 = jnp.arange(12).reshape((3, 4))
m2 = m1.reshape((4, 3))
# In case you were wondering : Since Python 3.5 we have `.__matmul__()` operator
# that happens to use the same character as for decorators (cf. `@jit` below).
mmp(m1, m2) - m1 @ m2
# YOUR ACTION REQUIRED:
# It's curry time!
# Try re-implementing mvp() but this time without using the in_axes=, and
# out_axes=. Instead use lambda expressions to (un)curry the arguments in such
# a way that vmap()'s default in_axes=0 and out_axes=0 does the job.
# (You can also re-implement mmp() this way, but it involves transposing).
##-snip
mvp_ = lambda m, v: jax.vmap(lambda x: vdp(x, v))(m)
print(mvp(m1, m2[:, 0]) - mvp_(m1, m2[:, 0]))
mmp_ = lambda m1, m2: jax.vmap(lambda x: mvp_(m1, x))(m2.T).T
mmp_(m1, m2) - m1 @ m2
# JAX would not have the final X in it's name if it were not for XLA, the
# magic sauce that somehow takes computation defined in a function as input
# and produces a much faster version of it.
# @jax.jit
def f(x):
y = x
for _ in range(10):
y = y - 0.1 * y + 3.0
return y[:100, :100]
x = jax.random.normal(jax.random.PRNGKey(0), (5000, 5000))
%timeit f(x).block_until_ready()
# YOUR ACTION REQUIRED:
# Move your magic JAX wand and cast a spell by removing a single character from
# above example, drastically speeding up the computation!
# Note: JIT unrolls the for loop and converts all computations to XLA
# primitives. XLA is then smart enough to fuse kernels for multiplication and
# addition, and optimize the program to only compute those parts that are
# actually needed for the function result...
# Just to be clear : `@jit` is Python's decorator syntax [1], you can also use
# jit() like the other function transformations.
# [1] https://www.python.org/dev/peps/pep-0318
@jax.jit
def f1_jit(x):
return x**0.5
def f2(x):
return x**0.5
# It's really the same.
f2_jit = jax.jit(f2)
f1_jit(2) - f2_jit(2)
# What you need to understand about JIT (1/3): When a function is traced.
@jax.jit
def noop(x):
# This statement only gets executed when the function is traced, i.e. every
# time you execute the JIT-ted version with a new ShapedArray (different dtype
# and/or different shape).
print("Tracing noop:", x)
return x
noop(jnp.arange(3)) # Tracing.
noop(jnp.arange(3) + 1) # Using trace from cache.
noop(jnp.arange(4)) # Tracing.
noop(jnp.arange(4.0)) # Tracing.
noop(jnp.arange(1.0, 5.0)) # Using trace from cache.
# What you need to understand about JIT (2/3): Baking in environment.
magic_number = 13
@jax.jit
def add_magic(x):
return x + magic_number
print(add_magic(np.array([0])))
magic_number = 42
print(add_magic(np.array([0])))
print(add_magic(np.array([0.0])))
# What you need to understand about JIT (2/3): Value-dependent flow.
def mult(x, n):
print("Tracing mult:", x, n)
tot = 0
while n > 0:
tot += x
n -= 1
return tot
# The problem:
# The following statement fails, because : JIT will generate the function's XLA
# code by tracing it with `ShapedArray`'s. These arrays have only their shape
# and datatype defined. Hence, if there are any statements involving the actual
# *values* of the parameters, JIT does not know what to do and raises an
# exception.
# (Note that if mult were traced with `ConcreteArray`s then the trace would work
# just fine; you can see that when executing `grad(mult)(3., 2.)`)
try:
jax.jit(mult)(3, 2)
except Exception as e:
print(f"\n### FAILED WITH : {e}")
# How can we fix this ??
# Solution 1 : static_argnums
jax.jit(mult, static_argnums=1)(3, 4)
jax.jit(mult, static_argnums=1)(3, 5)
jax.jit(mult, static_argnums=1)(3, 6)
# By the way : did you notice how the function is traced exactly three times the
# first time this cell is executed, but not when you re-execute the same cell?
# That's because JIT-ted functions are cached. If You want to observe the
# tracing a second time, you first need to execute above cell so that `mult`
# gets redefined and the cache needs to be updated with the new definition.
# Solution 2 : (un)currying
# YOUR ACTION REQUIRED:
# Use jit() without `static_argnums=`, but (un)curry the function mult instead.
##-snip
mult_jit = lambda x, n: jax.jit(lambda x: mult(x, n))(x)
mult_jit(3, 4)
mult_jit(3, 5)
mult_jit(3, 6)
# Solution 3 : Use XLA primitives for control flow.
# Remember: You can inspect `jax.lax.while_loop()` docs by either:
# - Go to https://jax.readthedocs.io
# - Execute a cell containing `?jax.lax.while_loop`
# - Hover your mouse over `while_loop` and wait two seconds
def mult_(x, n):
print("Tracing mult_:", x, n)
def cond_fun(n_tot):
n, tot = n_tot
return n > 0
def body_fun(n_tot):
n, tot = n_tot
return (n - 1, tot + x)
return jax.lax.while_loop(cond_fun, body_fun, (n, 0))
jax.jit(mult_)(3, 4)
jax.jit(mult_)(3, 5)
jax.jit(mult_)(3, 6)
# Woah! Wasn't JAX supposed to be fast !? What is going on here ??
# Also note that increasing the second number significantly will crash
# your runtime...
%%time
jax.jit(mult, static_argnums=1)(3, 5000)
##-snip
# JAX will unroll the loop before compiling it. Compiling a huge graph that has
# thousands of unrolled additions will take a long time, and at some point
# exhaust all memory, leading to a OOM crash.
# Does this function have the same problems? Why not?
%%time
jax.jit(mult_)(3, 5000)
##-snip
# No unrolling is happening here. We simply have a single XLA instruction that
# is compiled as any other instruction.
# Parallel computing is more fun with multiple devices :-)
# Go back to "Initialization" and connect to a different runtime if you're
# running on a single device.
assert jax.device_count() == 8, "Please connect to a JellyDonut runtime!"
# By default in_axes=0, so pmap() will split every incoming tensor across it's
# first axis - which should be sized jax.local_device_count().
# The computations are then performed in parallel and the results are returned
# as a sharded device array. The dat remains on the individual accelerators.
# Note that pmap() also XLA-compiles the function, so no need to call jit().
# Generate 8 different random seeds.
keys = jax.random.split(jax.random.PRNGKey(0), 8)
# Generate 8 different random matrices. Data remains on devices.
mats = jax.pmap(lambda key: jax.random.normal(key, (8_000, 8_000)))(keys)
# Perform 8 matmuls in parallel.
results = jax.pmap(lambda m1, m2: m1 @ m2)(mats, mats)
# YOUR ACTION REQUIRED:
# Fetch the mean of thes matrices from every device and print it out here.
##-snip
jax.pmap(jnp.mean)(results)
import functools
# Here we use jax.lax.psum() to do computations across devices. Note that these
# operations can cause a lot of communication costs. Below we split our 8
# devices along two axis (4x2).
# Note in particular that parallel operators work across hosts! We can't
# demonstrate this in a Colab, but you will encounter it later in the Flax
# examples and brain templates.
# You can read more about parallel operators here:
# https://jax.readthedocs.io/en/latest/jax.lax.html#parallel-operators
# axis 0 : rows
@functools.partial(jax.pmap, axis_name="rows")
# axis 1 : columns
@functools.partial(jax.pmap, axis_name="cols")
def f(x):
# across the rows (= column sum)
row_sum = jax.lax.psum(x, "rows")
# across the cols (= row sum)
col_sum = jax.lax.psum(x, "cols")
total_sum = jax.lax.psum(x, ("rows", "cols"))
return row_sum, col_sum, total_sum
# YOUR ACTION REQUIRED:
# Create an array, feed it to f() and verify the correctness of the results
##-snip
f(jnp.arange(8.0).reshape(4, 2))
# Whenever we encounter a function argument, e.g. the `params` for a model, or
# the first argument to `grad()` to whose respect we perform automatic
# differentiation, it can really be a "pytree" of `jnp.ndarray`. A pytree
# consists of an arbitrary combination of Python dict/list/tuple and allows us
# to structure our data hierarchically.
# This is a pytree:
data = dict(
array_3x2=jnp.arange(6.0).reshape((3, 2)),
mixed_tuple=(0.1, 0.2, 0.3, [1.0, 2.0, 3.0]),
subdict=dict(
array_3x4=jnp.arange(12.0).reshape((3, 4)),
array_4x3=jnp.arange(12.0).reshape((4, 3)),
),
)
# Call a function over all values, output resulting tree:
jax.tree_map(jnp.shape, data)
# Define a function that does some computation with the values:
def sumsquares(x):
value_flat, value_tree = jax.tree_flatten(x)
del value_tree # not needed.
tot = 0
for value in value_flat:
if isinstance(value, jnp.ndarray):
value = value.sum()
tot += value**2
return tot
sumsquares(data)
# Compute gradients. Remember that grad() computes gradients wrt the first
# argument, but that first argument can be an arbitrarily complex pytree (like
# all the weights in your hierarchical model).
grads = jax.grad(sumsquares)(data)
grads
# YOUR ACTION REQUIRED:
# Take a step against the gradients using `jax.tree_multimap()`
##-snip
data2 = jax.tree_multimap(lambda value, grad: value - 0.1 * grad, data, grads)
data2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step2: Now we'll create empty lc, rv, orb, and mesh datasets. We'll then look to see how the systemic velocity (vgamma) affects the observables in each of these datasets, and how those are also affected by light-time effects (ltte).
Step3: Changing Systemic Velocity and LTTE
Step4: We'll leave it set at 0.0 for now, and then change vgamma to see how that affects the observables.
Step5: The option to enable or disable LTTE are in the compute options, we can either set ltte or we can just temporarily pass a value when we call run_compute.
Step6: Let's first compute the model with 0 systemic velocity and ltte=False (not that it would matter in this case). Let's also name the model so we can keep track of what settings were used.
Step7: For our second model, we'll set a somewhat ridiculous value for the systemic velocity (so that the affects are exagerated and clearly visible over one orbit), but leave ltte off.
Step8: Lastly, let's leave this value of vgamma, but enable light-time effects.
Step9: Influence on Light Curves (fluxes)
Step10: However, once ltte is enabled, the time between two eclipses (ie the observed period of the system) changes. This occurs because the path between the system and observer has changed. This is an important effect to note - the period parameter sets the TRUE period of the system, not necessarily the observed period between two successive eclipses.
Step11: Influence on Radial Velocities
Step12: Light-time will have a similar affect on RVs as it does on LCs - it simply changes the observed period.
Step13: Influence on Orbits (positions, velocities)
Step14: Plotting the z-velocities with respect to time would show the same as the RVs, except without any Rossiter-McLaughlin like effects. Note however the flip in z-convention between vz and radial velocities (+z is defined as towards the observer to make a right-handed system, but by convention +rv is a red shift).
Step15: Now let's look at the effect that enabling ltte has on these same plots.
Step16: Influence on Meshes
Step17: As you can see, since the center of mass of the system was at 0,0,0 at t0 - including systemic velocity actually shows the system spiraling towards or away from the observer (who is in the positive z direction). In other words - the positions of the meshes are affected in the same way as the orbits (note the offset on the ylimit scales).
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.0,<2.1"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
times1 = np.linspace(0,1,201)
times2 = np.linspace(90,91,201)
b.add_dataset('lc', times=times1, dataset='lc1')
b.add_dataset('lc', times=times2, dataset='lc2')
b.add_dataset('rv', times=times1, dataset='rv1')
b.add_dataset('rv', times=times2, dataset='rv2')
b.add_dataset('orb', times=times1, dataset='orb1')
b.add_dataset('orb', times=times2, dataset='orb2')
b.add_dataset('mesh', times=[0], dataset='mesh1')
b.add_dataset('mesh', times=[900], dataset='mesh2')
b['vgamma@system']
b['t0@system']
b['ltte@compute']
b.run_compute(irrad_method='none', model='0_false')
b['vgamma@system'] = 100
b.run_compute(irrad_method='none', model='100_false')
b.run_compute(irrad_method='none', ltte=True, model='100_true')
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['lc1@0_false'].plot(color='b', ax=ax1)
axs, artists = b['lc1@100_false'].plot(color='r', ax=ax1)
axs, artists = b['lc2@0_false'].plot(color='b', ax=ax2)
axs, artists = b['lc2@100_false'].plot(color='r', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['lc1@100_false'].plot(color='r', ax=ax1)
axs, artists = b['lc1@100_true'].plot(color='g', ax=ax1)
axs, artists = b['lc2@100_false'].plot(color='r', ax=ax2)
axs, artists = b['lc2@100_true'].plot(color='g', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['rv1@0_false'].plot(color='b', ax=ax1)
axs, artists = b['rv1@100_false'].plot(color='r', ax=ax1)
axs, artists = b['rv2@0_false'].plot(color='b', ax=ax2)
axs, artists = b['rv2@100_false'].plot(color='r', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['rv1@100_false'].plot(color='r', ax=ax1)
axs, artists = b['rv1@100_true'].plot(color='g', ax=ax1)
axs, artists = b['rv2@100_false'].plot(color='r', ax=ax2)
axs, artists = b['rv2@100_true'].plot(color='g', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['orb1@0_false'].plot(x='xs', y='zs', color='b', ax=ax1)
axs, artists = b['orb1@100_false'].plot(x='xs', y='zs', color='r', ax=ax1)
axs, artists = b['orb2@0_false'].plot(x='xs', y='zs', color='b', ax=ax2)
axs, artists = b['orb2@100_false'].plot(x='xs', y='zs', color='r', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['orb1@0_false'].plot(x='times', y='vzs', color='b', ax=ax1)
axs, artists = b['orb1@100_false'].plot(x='times', y='vzs', color='r', ax=ax1)
axs, artists = b['orb2@0_false'].plot(x='times', y='vzs', color='b', ax=ax2)
axs, artists = b['orb2@100_false'].plot(x='times', y='vzs', color='r', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['orb1@100_false'].plot(x='xs', y='zs', color='r', ax=ax1)
axs, artists = b['orb1@100_true'].plot(x='xs', y='zs', color='g', ax=ax1)
axs, artists = b['orb2@100_false'].plot(x='xs', y='zs', color='r', ax=ax2)
axs, artists = b['orb2@100_true'].plot(x='xs', y='zs', color='g', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['orb1@100_false'].plot(x='times', y='vzs', color='r', ax=ax1)
axs, artists = b['orb1@100_true'].plot(x='times', y='vzs', color='g', ax=ax1)
axs, artists = b['orb2@100_false'].plot(x='times', y='vzs', color='r', ax=ax2)
axs, artists = b['orb2@100_true'].plot(x='times', y='vzs', color='g', ax=ax2)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['mesh1@0_false'].plot(time=0.0, x='xs', y='zs', ax=ax1)
axs, artists = b['mesh1@100_false'].plot(time=0.0, x='xs', y='zs', ax=ax1)
ax1.set_xlim(-10,10)
ax1.set_ylim(-10,10)
axs, artists = b['mesh2@0_false'].plot(time=900.0, x='xs', y='zs', ax=ax2)
axs, artists = b['mesh2@100_false'].plot(time=900.0, x='xs', y='zs', ax=ax2)
ax2.set_xlim(-10,10)
ax2.set_ylim(-10,10)
fig = plt.figure(figsize=(10,6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
axs, artists = b['mesh1@100_false'].plot(time=0.0, x='xs', y='zs', ax=ax1)
axs, artists = b['mesh1@100_true'].plot(time=0.0, x='xs', y='zs', ax=ax1)
ax1.set_xlim(-10,10)
ax1.set_ylim(-10,10)
axs, artists = b['mesh2@100_false'].plot(time=900.0, x='xs', y='zs', ax=ax2)
axs, artists = b['mesh2@100_true'].plot(time=900.0, x='xs', y='zs', ax=ax2)
ax2.set_xlim(-10,10)
ax2.set_ylim(11170,11200)
b['primary@mesh1@0_false'].get_value('vzs', time=0.0)[:5]
b['primary@mesh1@100_false'].get_value('vzs', time=0.0)[:5]
b['primary@mesh1@100_true'].get_value('vzs', time=0.0)[:5]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the block below we're loading the mean file (if it exists) and the image and then pre-processing the image for ingestion into a Caffe2 convolutional neural network!
Step2: Now that the image is ready to be ingested by the CNN, let's open the protobufs, load them into the workspace, and run the net.
Step3: See that we have 1000 result there in the middle? If we had submitted more that one image in our batch then the array would be larger, but still have 1000 units there in the middle. It is holding the probability for each category in the pre-trained model. So when you look at the results, it's like saying, "Computer, what's the probability that this is a Beryllium sphere?" Or gila monster, or any of the other 998 groups of things in there.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from caffe2.proto import caffe2_pb2
import numpy as np
import skimage.io
import skimage.transform
from matplotlib import pyplot
import os
from caffe2.python import core, workspace, models
import urllib2
print("Required modules imported.")
# Configuration --- Change to your setup and preferences!
CAFFE_MODELS = "/usr/local/caffe2/python/models"
# sample images you can try, or use any URL to a regular image.
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Whole-Lemon.jpg/1235px-Whole-Lemon.jpg"
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/7/7b/Orange-Whole-%26-Split.jpg"
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/a/ac/Pretzel.jpg"
# IMAGE_LOCATION = "https://cdn.pixabay.com/photo/2015/02/10/21/28/flower-631765_1280.jpg"
# IMAGE_LOCATION = "images/cat.jpg"
# IMAGE_LOCATION = "images/cowboy-hat.jpg"
# IMAGE_LOCATION = "images/cell-tower.jpg"
# IMAGE_LOCATION = "images/Ducreux.jpg"
# IMAGE_LOCATION = "images/pretzel.jpg"
# IMAGE_LOCATION = "images/orangutan.jpg"
# IMAGE_LOCATION = "images/aircraft-carrier.jpg"
IMAGE_LOCATION = "images/flower.jpg"
# What model are we using? You should have already converted or downloaded one.
# format below is the model's:
# folder, INIT_NET, predict_net, mean, input image size
# you can switch squeezenet out with 'bvlc_alexnet', 'bvlc_googlenet' or others that you have downloaded
# if you have a mean file, place it in the same dir as the model
MODEL = 'squeezenet', 'init_net.pb', 'predict_net.pb', 'ilsvrc_2012_mean.npy', 227
# codes - these help decypher the output and source from a list from AlexNet's object codes to provide an result like "tabby cat" or "lemon" depending on what's in the picture you submit to the neural network.
# The list of output codes for the AlexNet models (squeezenet)
codes = "https://gist.githubusercontent.com/aaronmarkham/cd3a6b6ac071eca6f7b4a6e40e6038aa/raw/9edb4038a37da6b5a44c3b5bc52e448ff09bfe5b/alexnet_codes"
print "Config set!"
def crop_center(img,cropx,cropy):
y,x,c = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def rescale(img, input_height, input_width):
print("Original image shape:" + str(img.shape) + " and remember it should be in H, W, C!")
print("Model's input shape is %dx%d") % (input_height, input_width)
aspect = img.shape[1]/float(img.shape[0])
print("Orginal aspect ratio: " + str(aspect))
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_width, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_height))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_width, input_height))
pyplot.figure()
pyplot.imshow(imgScaled)
pyplot.axis('on')
pyplot.title('Rescaled image')
print("New image shape:" + str(imgScaled.shape) + " in HWC")
return imgScaled
print "Functions set."
# set paths and variables from model choice and prep image
CAFFE_MODELS = os.path.expanduser(CAFFE_MODELS)
# mean can be 128 or custom based on the model
# gives better results to remove the colors found in all of the training images
MEAN_FILE = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[3])
if not os.path.exists(MEAN_FILE):
mean = 128
else:
mean = np.load(MEAN_FILE).mean(1).mean(1)
mean = mean[:, np.newaxis, np.newaxis]
print "mean was set to: ", mean
# some models were trained with different image sizes, this helps you calibrate your image
INPUT_IMAGE_SIZE = MODEL[4]
# make sure all of the files are around...
#if not os.path.exists(CAFFE2_ROOT):
# print("Houston, you may have a problem.")
INIT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[1])
print 'INIT_NET = ', INIT_NET
PREDICT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[2])
print 'PREDICT_NET = ', PREDICT_NET
if not os.path.exists(INIT_NET):
print(INIT_NET + " not found!")
else:
print "Found ", INIT_NET, "...Now looking for", PREDICT_NET
if not os.path.exists(PREDICT_NET):
print "Caffe model file, " + PREDICT_NET + " was not found!"
else:
print "All needed files found! Loading the model in the next block."
# load and transform image
img = skimage.img_as_float(skimage.io.imread(IMAGE_LOCATION)).astype(np.float32)
img = rescale(img, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE)
img = crop_center(img, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE)
print "After crop: " , img.shape
pyplot.figure()
pyplot.imshow(img)
pyplot.axis('on')
pyplot.title('Cropped')
# switch to CHW
img = img.swapaxes(1, 2).swapaxes(0, 1)
pyplot.figure()
for i in range(3):
# For some reason, pyplot subplot follows Matlab's indexing
# convention (starting with 1). Well, we'll just follow it...
pyplot.subplot(1, 3, i+1)
pyplot.imshow(img[i])
pyplot.axis('off')
pyplot.title('RGB channel %d' % (i+1))
# switch to BGR
img = img[(2, 1, 0), :, :]
# remove mean for better results
img = img * 255 - mean
# add batch size
img = img[np.newaxis, :, :, :].astype(np.float32)
print "NCHW: ", img.shape
# initialize the neural net
with open(INIT_NET) as f:
init_net = f.read()
with open(PREDICT_NET) as f:
predict_net = f.read()
p = workspace.Predictor(init_net, predict_net)
# run the net and return prediction
results = p.run([img])
# turn it into something we can play with and examine which is in a multi-dimensional array
results = np.asarray(results)
print "results shape: ", results.shape
# the rest of this is digging through the results
results = np.delete(results, 1)
index = 0
highest = 0
arr = np.empty((0,2), dtype=object)
arr[:,0] = int(10)
arr[:,1:] = float(10)
for i, r in enumerate(results):
# imagenet index begins with 1!
i=i+1
arr = np.append(arr, np.array([[i,r]]), axis=0)
if (r > highest):
highest = r
index = i
# top 3 results
print "Raw top 3 results:", sorted(arr, key=lambda x: x[1], reverse=True)[:3]
# now we can grab the code list
response = urllib2.urlopen(codes)
# and lookup our result from the list
for line in response:
code, result = line.partition(":")[::2]
if (code.strip() == str(index)):
print MODEL[0], "infers that the image contains ", result.strip()[1:-2], "with a ", highest*100, "% probability"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Betrachten Sie folgende Daten. Es handelt sich um ein vereinfachtes Tagging-Schema fürs Chunking, bei dem nur zwischen „Teil einer NP“ (1) und „nicht Teil einer NP“ (0) unterschieden wird.
Step2: Berechnen Sie für jeden der Chunker Accuracy, Precision, Recall und F1-Score zunächst per Hand und überprüfen Sie dann Ihr Ergebnis.
Step3: Aufgabe 2 Herunterladen von Ressourcen
Step4: Wenn Sie es erfolgreich heruntergeladen haben, können Sie folgendermaßen darauf zugrei-
Step5: Das chunk_types-Argument dient der Auwahl von Chunk-Typen (in diesem Beispiel
Step6: Evaluieren Sie Ihren Parser anschließend auf dem CoNLL 2000 Korpus
Step7: Aufgabe 4 Datenbasiertes Chunking
Step8: Trainieren und evaluieren Sie den UnigramChunker auf dem CoNLL 2000 Korpus.
Step9: Um uns einen Überblick darüber zu verschaffen, was der Chunker gelernt hat, können wir ihn für jedes mögliche POS-Tag eine Vorhersage treffen lassen
Step10: (b) Der ConsecutiveNPChunker, dessen Code Sie in der nächsten Zelle sehen, basiert auf einem Klassifikator.
Step11: Dies erlaubt uns, die Features, die für die Klassifikation extrahiert werden, genauer zu bestimmen.
Step12: Dieser Feature-Extraktor extrahiert genau ein Feature, nämlich das POS-Tag, das
Step13: (c) Fügen Sie weitere Features für
Step14: Hausaufgaben
Step15: Features schrittweise hinzufügen
Step16: Ablation Study
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.metrics import accuracy_score, precision_score,\
recall_score, f1_score
ground_truth = [1,0,1,0,0,1,1,1,1,0]
chunker1 = [1,1,1,0,1,0,1,1,1,1]
chunker2 = [1,0,1,0,0,0,0,0,1,0]
chunker3 = [0,0,0,0,0,1,1,1,1,0]
def evaluate(chunker):
print(
"Accuracy:",
"{:.2f}".format(accuracy_score(ground_truth, chunker))
)
print(
"Precision:",
"{:.2f}".format(precision_score(ground_truth, chunker))
)
print(
"Recall:",
"{:.2f}".format(recall_score(ground_truth, chunker))
)
print(
"F1-Score:",
"{:.2f}".format(f1_score(ground_truth, chunker))
)
evaluate(chunker1)
evaluate(chunker2)
evaluate(chunker3)
import nltk
nltk.download()
from nltk.corpus import conll2000
conll2000.chunked_sents('train.txt', chunk_types=['NP'])[99]
regex = r""
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])
cp = nltk.RegexpParser(regex)
print(cp.evaluate(test_sents))
class UnigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [
[
(t,c)
for w,t,c in nltk.chunk.tree2conlltags(sent)
]
for sent in train_sents
]
self.tagger = nltk.UnigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [
chunktag for (pos, chunktag) in tagged_pos_tags
]
conlltags = [
(word, pos, chunktag)
for ((word, pos), chunktag)
in zip(sentence, chunktags)
]
return nltk.chunk.conlltags2tree(conlltags)
train_sents = conll2000.chunked_sents('train.txt', chunk_types=['NP'])
# uc = ...
postags = sorted(set(pos for sent in train_sents for (word,pos) in sent.leaves()))
# uc.tagger.tag(postags)
class ConsecutiveNPChunkTagger(nltk.TaggerI):
def __init__(self, train_sents, npchunk_features):
self.extract_features = npchunk_features
train_set = []
for tagged_sent in train_sents:
untagged_sent = nltk.tag.untag(tagged_sent)
history = []
for i, (word, tag) in enumerate(tagged_sent):
featureset = npchunk_features(untagged_sent, i, history)
train_set.append( (featureset, tag) )
history.append(tag)
self.classifier = nltk.NaiveBayesClassifier.train(train_set)
def tag(self, sentence):
history = []
for i, word in enumerate(sentence):
featureset = self.extract_features(sentence, i, history)
tag = self.classifier.classify(featureset)
history.append(tag)
return zip(sentence, history)
class ConsecutiveNPChunker(nltk.ChunkParserI):
def __init__(self, train_sents, npchunk_features):
tagged_sents = [[((w,t),c) for (w,t,c) in
nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = ConsecutiveNPChunkTagger(tagged_sents, npchunk_features)
def parse(self, sentence):
tagged_sents = self.tagger.tag(sentence)
conlltags = [(w,t,c) for ((w,t),c) in tagged_sents]
return nltk.chunk.conlltags2tree(conlltags)
def pos_feature(sentence , i, history):
word, pos = sentence[i]
return {"pos": pos}
chunker = ConsecutiveNPChunker(train_sents, pos_feature)
# TODO: code for evaluation
def word_feature(sentence, i, history):
word, pos = sentence[i]
return {"pos": pos}
def previous_pos(sentence, i, history):
word, pos = sentence[i]
return {"pos": pos}
def previous_chunk(sentence, i, history):
word, pos = sentence[i]
return {"pos": pos}
chunker = ConsecutiveNPChunker(train_sents, word_feature)
print(chunker.evaluate(test_sents))
chunker = ConsecutiveNPChunker(train_sents, previous_pos)
print(chunker.evaluate(test_sents))
chunker = ConsecutiveNPChunker(train_sents, previous_chunk)
print(chunker.evaluate(test_sents))
def next_pos(sentence, i, history):
word, pos = sentence[i]
return {
"pos": pos
}
def prevcur_pos(sentence, i, history):
word, pos = sentence[i]
return {
"pos": pos
}
def curnext_pos(sentence, i, history):
word, pos = sentence[i]
return {
"pos": pos
}
def next_word(sentence, i, history):
word, pos = sentence[i]
return {
"pos": pos
}
def tags_since_dt(sentence, i, history):
def tags_since_dt_helper(sentence, i):
tags = set()
return '+'.join(sorted(tags))
word, pos = sentence[i]
return {
"pos": pos,
"tags-since-dt": tags_since_dt_helper(sentence, i)
}
chunker = ConsecutiveNPChunker(train_sents, next_pos)
print(chunker.evaluate(test_sents))
chunker = ConsecutiveNPChunker(train_sents, prevcur_pos)
print(chunker.evaluate(test_sents))
chunker = ConsecutiveNPChunker(train_sents, curnext_pos)
print(chunker.evaluate(test_sents))
chunker = ConsecutiveNPChunker(train_sents, next_word)
print(chunker.evaluate(test_sents))
chunker = ConsecutiveNPChunker(train_sents, tags_since_dt)
print(chunker.evaluate(test_sents))
def ablate(feat_extr, feat_name):
def ablated_feat_extr(sentence, i, history):
feat_dict = feat_extr(sentence, i, history)
feat_dict.pop(feat_name, None)
return feat_dict
return ablated_feat_extr
# in diese Liste sind die Namen der Features einzutragen, die oben jeweils vergeben wurden
for feat_name in []:
print("Ablated Feature:", feat_name)
feature_extractor = ablate(tags_since_dt, feat_name)
chunker = ConsecutiveNPChunker(train_sents, feature_extractor)
print(chunker.evaluate(test_sents))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting biased and unbiased CVS
Step2: Plotting contour plot of biased FES
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
unbiasedCVs = np.genfromtxt('NVT_monitor/COLVAR',comments='#');
biasedCVs = np.genfromtxt('MetaD/COLVAR',comments='#');
unbiasedCVsHOT = np.genfromtxt('NVT_monitor/hot/COLVAR',comments='#');
%matplotlib inline
fig = plt.figure(figsize=(6,6))
axes = fig.add_subplot(111)
stride=5
xlabel='$\Phi$'
ylabel='$\Psi$'
axes.plot(biasedCVs[::stride,1],biasedCVs[::stride,2],marker='o',markersize=4,linestyle='none')
axes.plot(unbiasedCVs[::stride,1],unbiasedCVs[::stride,2],marker='o',markersize=4,linestyle='none',markerfacecolor='yellow')
axes.set_xlabel(xlabel, fontsize=20)
axes.set_ylabel(ylabel, fontsize=20)
plt.show()
#read the data in from a text file
fesdata = np.genfromtxt('MetaD/fes.dat',comments='#');
fesdata = fesdata[:,0:3]
#what was your grid size? this calculates it
dim=int(np.sqrt(np.size(fesdata)/3))
#some post-processing to be compatible with contourf
X=np.reshape(fesdata[:,0],[dim,dim],order="F") #order F was 20% faster than A/C
Y=np.reshape(fesdata[:,1],[dim,dim],order="F")
Z=np.reshape((fesdata[:,2]-np.min(fesdata[:,2]))/4.184,[dim,dim],order="F") #convert to kcal/mol
#what spacing do you want? assume units are in kJ/mol
spacer=1
lines=20
levels=np.linspace(0,lines*spacer,num=(lines+1),endpoint=True)
fig=plt.figure(figsize=(10,8))
axes = fig.add_subplot(111)
plt.contourf(X, Y, Z, levels, cmap=plt.cm.bone,)
plt.colorbar()
plt.xlabel('$\Phi$')
plt.ylabel('$\Psi$')
axes.set_xlabel(xlabel, fontsize=20)
axes.set_ylabel(ylabel, fontsize=20)
stride=10
#axes.plot(biasedCVs[::stride,1],biasedCVs[::stride,2],marker='o',markersize=8,linestyle='none',markerfacecolor='cyan')
axes.plot(unbiasedCVs[::stride,1],unbiasedCVs[::stride,2],marker='o',markersize=8,linestyle='none',markerfacecolor='blue')
#axes.plot(unbiasedCVsHOT[::stride,1],unbiasedCVsHOT[::stride,2],marker='o',markersize=8,linestyle='none',markerfacecolor='red')
unbiasedCVs = np.genfromtxt('NVT_monitor/other_basin/COLVAR',comments='#');
stride=5
axes.plot(unbiasedCVs[::stride,1],unbiasedCVs[::stride,2],marker='o',markersize=8,linestyle='none',markerfacecolor='yellow')
plt.savefig('fes_bias.png')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How Many Photons Came From the Cluster?
Step2: Estimating the background
Step3: First, let's visualize the background region by masking out everything else.
Step4: Now let's look at the mean and median of the pixels in the background annulus that have non-negative values.
Step5: Q
Step6: Exercise
Step7: Now we can make our estimates
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import astropy.io.fits as pyfits
import numpy as np
import astropy.visualization as viz
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 10.0)
targdir = 'a1835_xmm/'
imagefile = targdir+'P0098010101M2U009IMAGE_3000.FTZ'
expmapfile = targdir+'P0098010101M2U009EXPMAP3000.FTZ'
bkgmapfile = targdir+'P0098010101M2X000BKGMAP3000.FTZ'
!du -sch $targdir/*
imfits = pyfits.open(imagefile)
im = imfits[0].data
plt.imshow(viz.scale_image(im, scale='log', max_cut=40), cmap='gray', origin='lower');
# First make some coordinate arrays, including polar r from the cluster center:
ny, nx = im.shape
centroid = np.unravel_index(im.argmax(), im.shape)
x = np.linspace(0, nx-1, nx)
y = np.linspace(0, ny-1, ny)
dx, dy = np.meshgrid(x,y)
dx = dx - centroid[1]
dy = dy - centroid[0]
r = np.sqrt(dx*dx + dy*dy)
# Now select an outer annulus, for the background,
# and an inner circle, for the cluster:
background = (r >= 100.0) & (r <= 150.0)
signal = (r < 100.0)
maskedimage = im.copy()
maskedimage[np.logical_not(background)] = -1
plt.imshow(viz.scale_image(maskedimage, scale='log', max_cut=40), cmap='gray', origin='lower')
meanbackground = np.mean(im[background])
medianbackground = np.median(im[background])
print("Mean background counts per pixel = ",meanbackground)
print("Median background counts per pixel = ",medianbackground)
plt.figure(figsize=(10,7))
n, bins, patches = plt.hist(im[background], bins=np.linspace(-3.5,29.5,34))
# plt.yscale('log', nonposy='clip')
plt.xlabel('Background annulus pixel value (counts)')
plt.ylabel('Frequency')
plt.axis([-3.0, 30.0, 0, 40000])
plt.grid(True)
plt.show()
stdevbackground = np.std(im[background])
print("Standard deviation: ",stdevbackground)
maskedimage = im.copy()
maskedimage[np.logical_not(signal)] = 0
plt.imshow(viz.scale_image(maskedimage, scale='log', max_cut=40), cmap='gray', origin='lower')
plt.figure(figsize=(10,7))
n, bins, patches = plt.hist(im[signal], bins=np.linspace(-3.5,29.5,34), color='red')
plt.yscale('log', nonposy='clip')
plt.xlabel('Signal region pixel value (counts)')
plt.ylabel('Frequency')
plt.axis([-3.0, 30.0, 0, 500000])
plt.grid(True)
plt.show()
# Total counts in signal region:
Ntotal = np.sum(im[signal])
# Background counts: the mean counts per pixel in the annulus,
# multiplied by the number of pixels in the signal region:
Nbackground = np.count_nonzero(signal)*meanbackground # Is this a good choice?
# Difference is the cluster counts:
Ncluster = Ntotal - Nbackground
print("Counts in signal region: ",Ntotal)
print("Approximate counts due to background: ",Nbackground)
print("Approximate counts due to cluster: ",Ncluster)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q1
Step3: Q5
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
racine = NoeudTri("un") # noeud tri n'est pas encore défini
racine.insere ("unite")
racine.insere ("deux")
print(racine)
from pyensae.graphhelper import draw_diagram
img = draw_diagram(
blockdiag {
A -> B -> C -> D;
A -> E -> F;
F -> G [label = "edge-FG"];
E [label="label-E"]
}
)
img
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need a primitive to run the force apply to poppy's chest.
Step2: To start and stop the force primitive
Step3: Prepared poppy for experiments
Step4: Track the head like an IMU could do. On the next graph, you can see the pushing force every two seconds on the chest.
Step5: Changing the reference frame from absolute to a frame relative to poppy (if you want to know more on matrix of rotation, you have this tutorial. Choosing the frame oriented by visual chest. The graph shows the same movement but in the relative frame and the arms try to decrease the ocillation by balancing the movement in the opposite direction.
Step6: The balance use only the speed of the movement (i.e the movement of the arm is only proportional to the speed of the move of the head. It could be better to use also the acceleration and the position (integration of speed). So to be continued...
Step7: What can I say, in conclusion
|
<ASSISTANT_TASK:>
Python Code:
from poppy.creatures import PoppyHumanoid
poppy = PoppyHumanoid(simulator='vrep')
%pylab inline
# the class time is used to set the time object to be the simulated time in V-REP and not the default python time
import time as real_time
class time:
def __init__(self,robot):
self.robot=robot
def time(self):
t_simu = self.robot.current_simulation_time
return t_simu
def sleep(self,t):
t0 = self.robot.current_simulation_time
while (self.robot.current_simulation_time - t0) < t-0.01:
real_time.sleep(0.001)
time = time(poppy)
print time.time()
time.sleep(0.025) #0.025 is the minimum step according to the V-REP defined dt
print time.time()
from pypot.primitive import Primitive
class force_primitive(Primitive):
def __init__(self, robot, fx, fy, fz, shape):
self.robot = robot
self.fx = fx
self.fy = fy
self.fz = fz
self.shape = shape
Primitive.__init__(self, robot)
def run(self):
while not self.should_stop():
poppy.set_vrep_force([self.fx,self.fy,self.fz],self.shape)
time.sleep(0.025)
poppy.set_vrep_force([0,0,0],self.shape)
time.sleep(2)
force = force_primitive(poppy,-15,0,0,'chest_respondable')
force.start()
force.stop()
poppy.reset_simulation()
poppy.head_y.goto_position(0, 4, wait=False)
poppy.r_shoulder_y.goto_position(0, 4, wait=False)
poppy.l_shoulder_y.goto_position(0, 4, wait=False)
poppy.l_shoulder_x.goto_position(20, 4, wait=False)
poppy.r_shoulder_x.goto_position(-20, 4, wait=True)
list_pos_x = []
list_pos_y = []
list_pos_z = []
t= []
pos = poppy.get_object_position('head_visual')
pos_x=pos[0]
pos_y=pos[1]
pos_z=pos[2]
t0 = time.time()
while time.time() - t0 < 20:
pos = poppy.get_object_position('head_visual')
if pos_x != pos[0]:
decalage_x=pos[0]-pos_x
decalage_y=pos[1]-pos_y
decalage_z=pos[2]-pos_z
list_pos_x.append(decalage_x)
list_pos_y.append(decalage_y)
list_pos_z.append(decalage_z)
pos_x = pos[0]
pos_y = pos[1]
pos_z = pos[2]
t.append(poppy.current_simulation_time)
time.sleep(0.01)
plot(t, list_pos_x)
plot(t, list_pos_y)
plot(t, list_pos_z)
legend(('dx', 'dy','dz'))
def rotate_matrix(a,b,g):
Rx = np.mat([[1,0,0], [0,cos(a),-sin(a)], [0,sin(a),cos(a)]])
Ry = np.mat([[cos(b),0,sin(b)], [0,1,0], [-sin(b),0,cos(b)]])
Rz = np.mat([[cos(g),-sin(g),0], [sin(g),cos(g),0], [0,0,1]])
Rot = Rz*Ry*Rx
return Rot
list_pos_x = []
list_pos_y = []
list_pos_z = []
t= []
pos0 = np.mat(poppy.get_object_position('head_visual')).transpose()
t0 = time.time()
time.sleep(0.01)
while time.time() - t0 < 20:
pos1 = np.mat(poppy.get_object_position('head_visual')).transpose()
if any(pos1 != pos0):
d_pos = pos1-pos0
#make a rotation to d_pos to transpose the movement in a relative frame (frame of chest_visual)
orient_chest = poppy.get_object_orientation('chest_visual')
Rot=rotate_matrix(-orient_chest[0],-orient_chest[1],-orient_chest[2])
d_pos=Rot*d_pos
#balance the movement with an opposite movement of the arm
#le terme instantanée doit être ajouté à un terme intégré sur les 10 derniers mouvements
poppy.r_shoulder_y.goal_position = d_pos[1]*2000
poppy.l_shoulder_y.goal_position = d_pos[1]*2000
poppy.r_shoulder_x.goal_position = -20-d_pos[2]*2000
poppy.l_shoulder_x.goal_position = 20-d_pos[2]*2000
list_pos_x.append(float(d_pos[0]))
list_pos_y.append(float(d_pos[1]))
list_pos_z.append(float(d_pos[2]))
t.append(poppy.current_simulation_time)
pos0 = pos1
time.sleep(0.01)
plot(t, list_pos_x)
plot(t, list_pos_y)
plot(t, list_pos_z)
legend(('dx', 'dy','dz'))
# choose the coefficient you want to apply to speed, position and acceleration. (P S A)
P = 200
S = 2000
A = 2000
class IMU:
def __init__(self,nb_record=10):
self.record_pos=[]
i=0
while i<nb_record:
self.record_pos.insert(0,0)
i+=1
def add_pos(self,d_pos):
size = len(self.record_pos)
self.record_pos.insert(0,d_pos)
del self.record_pos[size]
def speed(self):
return self.record_pos[0]
def acceleration(self):
return self.record_pos[0]-self.record_pos[1]
def position(self):
integrate = 0
for i in self.record_pos:
integrate += i
return integrate
def rotate_matrix(a,b,g):
Rx = np.mat([[1,0,0], [0,cos(a),-sin(a)], [0,sin(a),cos(a)]])
Ry = np.mat([[cos(b),0,sin(b)], [0,1,0], [-sin(b),0,cos(b)]])
Rz = np.mat([[cos(g),-sin(g),0], [sin(g),cos(g),0], [0,0,1]])
Rot = Rz*Ry*Rx
return Rot
list_pos_x = []
list_pos_y = []
list_pos_z = []
list_speed_x = []
list_speed_y = []
list_speed_z = []
list_acceleration_x = []
list_acceleration_y = []
list_acceleration_z = []
t= []
pos0 = np.mat(poppy.get_object_position('head_visual')).transpose()
t0 = time.time()
time.sleep(0.01)
IMU_x = IMU(20)
IMU_y = IMU(20)
IMU_z = IMU(20)
while time.time() - t0 < 15:
pos1 = np.mat(poppy.get_object_position('head_visual')).transpose()
if any(pos1 != pos0):
d_pos = pos1-pos0
#make a rotation to d_pos to transpose the movement in a relative frame (frame of chest_visual)
orient_chest = poppy.get_object_orientation('chest_visual')
Rot=rotate_matrix(-orient_chest[0],-orient_chest[1],-orient_chest[2])
d_pos=Rot*d_pos
# record the speed with the IMU class
IMU_x.add_pos(float(d_pos[0]))
IMU_y.add_pos(float(d_pos[1]))
IMU_z.add_pos(float(d_pos[2]))
#balance the movement with an opposite movement of the arm
poppy.r_shoulder_y.goal_position = IMU_y.speed()*S+IMU_y.position()*P+IMU_y.acceleration()*A
poppy.l_shoulder_y.goal_position = IMU_y.speed()*S+IMU_y.position()*P+IMU_y.acceleration()*A
poppy.r_shoulder_x.goal_position = -20-IMU_z.speed()*S-IMU_z.position()*P-IMU_z.acceleration()*A
poppy.l_shoulder_x.goal_position = 20-IMU_z.speed()*S-IMU_z.position()*P-IMU_z.acceleration()*A
# record for graph
list_pos_x.append(IMU_x.position())
list_pos_y.append(IMU_y.position())
list_pos_z.append(IMU_z.position())
list_speed_x.append(IMU_x.speed())
list_speed_y.append(IMU_y.speed())
list_speed_z.append(IMU_z.speed())
list_acceleration_x.append(IMU_x.acceleration())
list_acceleration_y.append(IMU_y.acceleration())
list_acceleration_z.append(IMU_z.acceleration())
t.append(poppy.current_simulation_time)
pos0 = pos1
time.sleep(0.01)
figure(1)
plot(t, list_pos_x)
plot(t, list_pos_y)
plot(t, list_pos_z)
legend(('dx', 'dy','dz'))
xlabel('time seconds')
title ('IMU position')
figure(2)
plot(t, list_speed_x)
plot(t, list_speed_y)
plot(t, list_speed_z)
legend(('dx', 'dy','dz'))
xlabel('time seconds')
title ('IMU speed')
figure(3)
plot(t, list_acceleration_x)
plot(t, list_acceleration_y)
plot(t, list_acceleration_z)
legend(('ddx', 'ddy','ddz'))
xlabel('time seconds')
title ('IMU acceleration')
poppy.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Split the data into two equally sized parts
Step2: Instantiate the classifier
Step3: Train the classifier on the first fold, then predict the labels of the second fold
Step4: Train the classifier on the second fold, then predict the labels of the first fold
Step5: Compute accuracy scores for both folds
Step6: This procedure will yield two accuracy scores, one for the first fold (92% accuracy), and one
Step7: Perform cross-validation with the cross_val_score function. This function
Step8: In order to get a sense how the model did on average, we can look at the mean and
Step9: With five folds, we have a much better idea about how robust the classifier is on average.
Step10: This object can be passed directly to the cross_val_score function in the following way
Step11: Because every test set now contains a single data point, we would expect the scorer to
Step12: If we want to know the average performance of the classifier, we would still compute the
Step13: We can see this scoring scheme returns very similar results to five-fold cross-validation.
Step14: From our dataset with $N$ samples, randomly choose $N$ samples with replacement
Step15: Put all samples that do not show in the bootstrap in the out-of-bag set
Step16: Train the classifier on the bootstrap samples
Step17: Test the classifier on the out-of-bag samples
Step18: Then we want to repeat these steps up to 10,000 times to get 10,000
Step19: To make sure we all get the same result, let's fix the seed of the random number generator
Step20: Now, let's run the procedure for n_iter=10 times by converting the function output to a
Step21: As you can see, for this small sample we get accuracy scores anywhere between 92% and
Step22: You are always welcome to increase the number of repetitions. But once n_iter is large
Step23: Typically, the scores obtained with bootstrapping would be used in a statistical test to
Step24: Let's start with a simple example. Assume we ran five-fold cross-validation on two
Step25: This means that Model A achieved 100% accuracy in all five folds, whereas Model B got 0%
Step26: And we do! We actually get the smallest possible $p$-value, $p=0.0$.
Step27: Analogous to the aforementioned, we get the largest possible $p$-value, $p=1.0$.
Step28: Obtain a set of test scores for Model B. Let's choose Model B to be a $k$-NN
Step29: Apply the $t$-test to both sets of scores
Step31: As you can see, this is a good example of two classifiers giving different cross-validation
Step32: Let's assume the preceding Model A and Model B were applied to the same five data points.
Step33: McNemar's test wants to know two things
Step34: Of course, this applies to all of the data points. The opposite is true for the data points that
Step35: Feeding these numbers to McNemar's test should return a small $p$-value because the two
Step36: And it does!
Step37: The number of data points that one of the classifiers got right but the other got wrong are as
Step38: We got no differences whatsoever! Now it becomes clear why the $t$-test led us to believe
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
import numpy as np
iris = load_iris()
X = iris.data.astype(np.float32)
y = iris.target
from sklearn.model_selection import train_test_split
X_fold1, X_fold2, y_fold1, y_fold2 = train_test_split(
X, y, random_state=37, train_size=0.5
)
import cv2
knn = cv2.ml.KNearest_create()
knn.setDefaultK(1)
knn.train(X_fold1, cv2.ml.ROW_SAMPLE, y_fold1)
_, y_hat_fold2 = knn.predict(X_fold2)
knn.train(X_fold2, cv2.ml.ROW_SAMPLE, y_fold2)
_, y_hat_fold1 = knn.predict(X_fold1)
from sklearn.metrics import accuracy_score
accuracy_score(y_fold1, y_hat_fold1)
accuracy_score(y_fold2, y_hat_fold2)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=1)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(model, X, y, cv=5)
scores
scores.mean(), scores.std()
from sklearn.model_selection import LeaveOneOut
scores = cross_val_score(model, X, y, cv=LeaveOneOut())
scores
scores.mean(), scores.std()
knn = cv2.ml.KNearest_create()
knn.setDefaultK(1)
idx_boot = np.random.choice(len(X), size=len(X), replace=True)
X_boot = X[idx_boot, :]
y_boot = y[idx_boot]
idx_oob = np.array([x not in idx_boot
for x in np.arange(len(X))], dtype=np.bool)
X_oob = X[idx_oob, :]
y_oob = y[idx_oob]
knn.train(X_boot, cv2.ml.ROW_SAMPLE, y_boot)
_, y_hat = knn.predict(X_oob)
accuracy_score(y_oob, y_hat)
def yield_bootstrap(model, X, y, n_iter=10000):
for _ in range(n_iter):
# train the classifier on bootstrap
idx_boot = np.random.choice(len(X), size=len(X),
replace=True)
X_boot = X[idx_boot, :]
y_boot = y[idx_boot]
model.train(X_boot, cv2.ml.ROW_SAMPLE, y_boot)
# test classifier on out-of-bag examples
idx_oob = np.array([x not in idx_boot
for x in np.arange(len(X))],
dtype=np.bool)
X_oob = X[idx_oob, :]
y_oob = y[idx_oob]
_, y_hat = model.predict(X_oob)
# return accuracy
yield accuracy_score(y_oob, y_hat)
np.random.seed(42)
list(yield_bootstrap(knn, X, y, n_iter=10))
acc = list(yield_bootstrap(knn, X, y, n_iter=1000))
np.mean(acc), np.std(acc)
acc = list(yield_bootstrap(knn, X, y, n_iter=10000))
np.mean(acc), np.std(acc)
from scipy.stats import ttest_ind
scores_a = [1, 1, 1, 1, 1]
scores_b = [0, 0, 0, 0, 0]
ttest_ind(scores_a, scores_b)
scores_a = [0.9, 0.9, 0.9, 0.8, 0.8]
scores_b = [0.8, 0.8, 0.9, 0.9, 0.9]
ttest_ind(scores_a, scores_b)
k1 = KNeighborsClassifier(n_neighbors=1)
scores_k1 = cross_val_score(k1, X, y, cv=10)
np.mean(scores_k1), np.std(scores_k1)
k3 = KNeighborsClassifier(n_neighbors=3)
scores_k3 = cross_val_score(k3, X, y, cv=10)
np.mean(scores_k3), np.std(scores_k3)
ttest_ind(scores_k1, scores_k3)
from scipy.stats import binom
def mcnemar_midp(b, c):
Compute McNemar's test using the "mid-p" variant suggested by:
M.W. Fagerland, S. Lydersen, P. Laake. 2013. The McNemar test for
binary matched-pairs data: Mid-p and asymptotic are better than exact
conditional. BMC Medical Research Methodology 13: 91.
`b` is the number of observations correctly labeled by the first---but
not the second---system; `c` is the number of observations correctly
labeled by the second---but not the first---system.
n = b + c
x = min(b, c)
dist = binom(n, .5)
p = 2. * dist.cdf(x)
midp = p - dist.pmf(x)
return midp
scores_a = np.array([1, 1, 1, 1, 1])
scores_b = np.array([0, 0, 0, 0, 0])
a1_b0 = scores_a * (1 - scores_b)
a1_b0
a0_b1 = (1 - scores_a) * scores_b
a0_b1
mcnemar_midp(a1_b0.sum(), a0_b1.sum())
scores_k1 = cross_val_score(k1, X, y, cv=LeaveOneOut())
scores_k3 = cross_val_score(k3, X, y, cv=LeaveOneOut())
np.sum(scores_k1 * (1 - scores_k3))
np.sum((1 - scores_k3) * scores_k3)
mcnemar_midp(np.sum(scores_k1 * (1 - scores_k3)),
np.sum((1 - scores_k1) * scores_k3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Flip
Step2: Blur
Step3: Denoise
Step4: Generic filter
Step5: Blur refactored
Step6: Denoise refactored
Step7: Edges
Step8: Sharpen
|
<ASSISTANT_TASK:>
Python Code:
from PIL import Image
img = Image.open('eye.png')
img = img.convert("L") # grayscale
img # same as display(img)
# define function flip()
# open 'eye.png', convert to grayscale, flip, and display
# define getpixel, region3x3, avg, and blur functions
img = Image.open('pcb.png')
img = img.convert("L") # make greyscale if not already (luminance)
img
img = blur(img)
img
# define median and denoise functions
img = Image.open('Veggies_noise.jpg')
img = img.convert("L") # make greyscale if not already (luminance)
# denoise 3 times and display
# show 'guesswho.png'
# denoise 3 times then display
# define filterAnd open functions
# Display 'pcb.png'
img
# use filter to blur the image
img = open('guesswho.png')
img
# using filter function, denoise the image
# define laplace function
# Open 'obama.png' and show the edges
# Show the edges for 'phobos2.jpg
# define minus function
# display 'bonkers.png'
# sharpen that image and display it
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up
Step2: IS Spectra
Step3: White Noise
Step4: Shaped Noise
Step5: Window Function
Step6: Full ISR Data Creation and Estimator
Step7: Plotting and Normalization of Input Spectra
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import scipy as sp
import scipy.fftpack as scfft
from SimISR.utilFunctions import makesumrule,MakePulseDataRepLPC,spect2acf,acf2spect,CenteredLagProduct
from SimISR.IonoContainer import IonoContainer,MakeTestIonoclass
from ISRSpectrum.ISRSpectrum import ISRSpectrum
import seaborn as sns
# Processings parameters
spfreq=50e3 # Bandwidth
nspec=256 # length of spectrum
rep1=10000 # number of pulses
L=24. # Length of pulse in standard processings
pulse=sp.ones(int(L)) # Pulse for standard processing
pulse_pergram=sp.ones(nspec) # For periodogram
Nrg=128 # Number of Range gates for data
# Parameters for spectrum
species=['O+','e-']
databloc = sp.array([[1.66e10,1e3],[1.66e10,2.5e3]])
f_c = 440e6
# setup seaborne
sns.set_style("whitegrid")
sns.set_context("notebook")
# Make spectrum
ISpec_ion = ISRSpectrum(centerFrequency=f_c, nspec=nspec, sampfreq=spfreq, dFlag=False)
f,cur_spec,rcs = ISpec_ion.getspecsep(databloc,species,rcsflag=True)
specsum = sp.absolute(cur_spec).sum()
cur_spec = len(cur_spec)*cur_spec*rcs/specsum
tau,acf=spect2acf(f,cur_spec)
fig,ax = plt.subplots(1,2,sharey=False, figsize=(8,4),facecolor='w')
rp,imp=ax[0].plot(tau*1e3,acf.real,tau*1e3,acf.imag)
ax[0].legend([rp,imp],['Real','Imag'])
ax[0].set_ylabel('Amplitude')
ax[0].set_title('ACF')
ax[0].set_xlabel(r'$\tau$ in ms')
ax[1].plot(f*1e-3,cur_spec.real)
ax[1].set_ylabel('Amplitude')
ax[1].set_title('Spectrum')
ax[1].set_xlabel(r'f in kHz')
fig.tight_layout()
xin =sp.power(2,-.5)*(sp.random.randn(rep1,nspec)+1j*sp.random.randn(rep1,nspec))
Xfft=sp.power(nspec,-.5)*scfft.fftshift(scfft.fft(xin,axis=-1),axes=-1)
Xperiod=sp.power(Xfft.real,2).mean(0) +sp.power(Xfft.imag,2).mean(0)
tau2,acfperiod=spect2acf(f,Xperiod*nspec)
fig2,ax2 = plt.subplots(1,2,sharey=False, figsize=(8,4),facecolor='w')
rp,imp=ax2[0].plot(tau2*1e6,acfperiod.real,tau2*1e6,acfperiod.imag)
ax2[0].legend([rp,imp],['Real','Imag'])
ax2[0].set_ylabel('Amplitude')
ax2[0].set_title('ACF')
ax2[0].set_xlabel(r'$\tau$ in $\mu$s')
ax2[1].plot(f*1e-3,Xperiod.real)
ax2[1].set_ylabel('Amplitude')
ax2[1].set_title('Spectrum')
ax2[1].set_xlabel(r'f in kHz')
ax2[1].set_ylim([0.,1.5])
fig2.tight_layout()
Xdata = MakePulseDataRepLPC(pulse_pergram,cur_spec,30,rep1,numtype = sp.complex128)
Xfftd=sp.power(nspec,-.5)*scfft.fftshift(scfft.fft(Xdata,axis=-1),axes=-1)
Xperiodd=sp.power(Xfftd.real,2).mean(0) +sp.power(Xfftd.imag,2).mean(0)
tau3,acfperiodd=spect2acf(f,Xperiodd*nspec)
fig3,ax3 = plt.subplots(1,2,sharey=False, figsize=(8,4),facecolor='w')
rp,imp=ax3[0].plot(tau3*1e6,acfperiodd.real,tau3*1e6,acfperiodd.imag)
ax3[0].legend([rp,imp],['Real','Imag'])
ax3[0].set_ylabel('Amplitude')
ax3[0].set_title('ACF')
ax3[0].set_xlabel(r'$\tau$ in $\mu$s')
ax3[1].plot(f*1e-3,Xperiodd.real)
ax3[1].set_ylabel('Amplitude')
ax3[1].set_title('Spectrum')
ax3[1].set_xlabel(r'f in kHz')
fig2.tight_layout()
v=1
l=sp.arange(L)
W=-l**2/(L*v) + (L-v)*l/L/v+1
Wp=sp.pad(W,(int(sp.ceil(float(nspec-L)/2)),int(sp.floor(float(nspec-L)/2))),'constant',constant_values=0)
wfft=scfft.fftshift(scfft.fft(W,n=nspec))
fig4,ax4 = plt.subplots(1,2,sharey=False, figsize=(8,4),facecolor='w')
ax4[0].plot(l,W)
ax4[0].set_ylabel('Weighting')
ax4[0].set_title('Weighting')
ax4[0].set_xlabel(r'$l$')
rp,imp,abp=ax4[1].plot(f*1e-3,wfft.real,f*1e-3,wfft.imag,f*1e-3,sp.absolute(wfft))
ax4[1].legend([rp,imp,abp],['Real','Imag','Abs'])
ax4[1].set_ylabel('Amplitude')
ax4[1].set_title('Spectrum')
ax4[1].set_xlabel(r'f in kHz')
fig4.tight_layout()
Xdata=sp.zeros((rep1,Nrg),dtype=sp.complex128)
Lint = int(L)
for i in range(int(Nrg-Lint)):
Xdata[:,i:i+Lint] = MakePulseDataRepLPC(pulse,cur_spec,40,rep1,numtype = sp.complex128)+Xdata[:,i:i+Lint]
lagsData=CenteredLagProduct(Xdata,numtype=sp.complex128,pulse =pulse,lagtype='centered')/rep1
ptype='long'
ts = 1.
sumrule=makesumrule(ptype,L,ts,lagtype='centered')
minrg = -sumrule[0].min()
maxrg = Nrg-sumrule[1].max()
Nrng2 = maxrg-minrg
lagsDatasum = sp.zeros((Nrng2,Lint),dtype=sp.complex128)
for irngnew,irng in enumerate(sp.arange(minrg,maxrg)):
for ilag in range(Lint):
lagsDatasum[irngnew,ilag] = lagsData[irng+sumrule[0,ilag]:irng+sumrule[1,ilag]+1,ilag].sum(axis=0)
# divide off the gain from the pulse stacking
lagsDatasum = lagsDatasum/L
dt=tau[1]-tau[0]
f1,spec_all=acf2spect(l*dt,lagsDatasum,n_s=nspec)
acf_single = lagsDatasum[50]
spec_single = spec_all[50]
# Apply weighting and integrations from gain from pulse stacking
acf_act=scfft.ifftshift(acf)[:Lint]*W
feh,spec_act=acf2spect(l*dt,acf_act,n_s=nspec)
fig5,ax5 = plt.subplots(1,2,sharey=False, figsize=(8,4),facecolor='w')
rp,imp,act_acf=ax5[0].plot(l*dt*1e6,acf_single.real,l*dt*1e6,acf_single.imag,l*dt*1e6,acf_act.real)
ax5[0].legend([rp,imp,act_acf],['Real','Imag','Actual'])
ax5[0].set_ylabel('Amplitude')
ax5[0].set_title('ACF')
ax5[0].set_xlabel(r'$\tau$ in $\mu$s')
est1,act_spec=ax5[1].plot(f*1e-3,spec_single.real,f*1e-3,spec_act.real)
ax5[1].legend([est1,act_spec],['Estimated','Actual'])
ax5[1].set_ylabel('Amplitude')
ax5[1].set_title('Spectrum')
ax5[1].set_xlabel(r'f in kHz')
fig5.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import normalize
import seaborn as sns
cm = confusion_matrix(target, pred)
normed_confusion_matrix = normalize(cm, axis = 1, norm = 'l1')
cm_df = pd.DataFrame(normed_confusion_matrix,index, columns)
sns.heatmap(cm_df, annot=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The Euro problem
Step4: If we know the coin is fair, we can evaluate the likelihood of the data directly.
Step5: If we cheat an pretend that the alternative hypothesis is exactly the observed proportion, we can compute the likelihood of the data and the likelihood ratio, relative to the fair coin.
Step6: Under this interpretation, the data are in favor of "biased", with K=6. But that's a total cheat.
Step8: Under this interpretation, the data are in favor of "biased", but very weak.
Step9: Here's what it looks like if "biased" means "equally likely to be any value between 0 and 1".
Step11: By that definition, the data are evidence against the biased hypothesis, with K=2.
Step12: Here's what it looks like
Step13: By the triangle definition of "biased", the data are very weakly in favor of "fair".
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
% matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import math
import numpy as np
from thinkbayes2 import Pmf, Cdf, Suite, Joint
import thinkplot
class Euro(Suite):
Represents hypotheses about the probability of heads.
def Likelihood(self, data, hypo):
Computes the likelihood of the data under the hypothesis.
hypo: integer value of x, the probability of heads (0-100)
data: tuple of (number of heads, number of tails)
x = hypo / 100.0
heads, tails = data
like = x**heads * (1-x)**tails
return like
data = 140, 110
suite = Euro()
like_f = suite.Likelihood(data, 50)
print('p(D|F)', like_f)
actual_percent = 100.0 * 140 / 250
likelihood = suite.Likelihood(data, actual_percent)
print('p(D|B_cheat)', likelihood)
print('p(D|B_cheat) / p(D|F)', likelihood / like_f)
like40 = suite.Likelihood(data, 40)
like60 = suite.Likelihood(data, 60)
likelihood = 0.5 * like40 + 0.5 * like60
print('p(D|B_two)', likelihood)
print('p(D|B_two) / p(D|F)', likelihood / like_f)
def SuiteLikelihood(suite, data):
Computes the weighted average of likelihoods for sub-hypotheses.
suite: Suite that maps sub-hypotheses to probability
data: some representation of the data
returns: float likelihood
total = 0
for hypo, prob in suite.Items():
like = suite.Likelihood(data, hypo)
total += prob * like
return total
b_uniform = Euro(range(0, 101))
b_uniform.Remove(50)
b_uniform.Normalize()
likelihood = SuiteLikelihood(b_uniform, data)
print('p(D|B_uniform)', likelihood)
print('p(D|B_uniform) / p(D|F)', likelihood / like_f)
def TrianglePrior():
Makes a Suite with a triangular prior.
suite = Euro()
for x in range(0, 51):
suite.Set(x, x)
for x in range(51, 101):
suite.Set(x, 100-x)
suite.Normalize()
return suite
b_tri = TrianglePrior()
b_tri.Remove(50)
b_tri.Normalize()
likelihood = b_tri.Update(data)
print('p(D|B_tri)', likelihood)
print('p(D|B_tri) / p(D|F)', likelihood / like_f)
likelihood = SuiteLikelihood(b_uniform, data)
likelihood
euro = Euro(b_uniform)
euro.Update(data)
likelihood = SuiteLikelihood(b_tri, data)
likelihood
euro = Euro(b_tri)
euro.Update(data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Intro to Python OOP
Step2: Example
Step3: Example
Step4: There's a few things here which I haven't introduced, but all will become clear in the remainder of this workshop.
Step5: Classes
Step7: Instance attributes and Class attributes
Step8: Example
Step9: Class vs Instance attributes
Step10: Exercise
Step11: Example
Step12: Note
Step13: Classes
Step14: Example
Step15: Notes
Step17: Magic Methods
Step18: Note
|
<ASSISTANT_TASK:>
Python Code:
# Run this cell before trying examples
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Numpy arrays are classes
import numpy as np
a = np.array([0, 1, 6, 8, 12])
print(a.__class__)
print(type(a))
# We want to operate on the array: try numpy cumulative sum function
print(np.cumsum(a))
# np.cumsum('helloworld') # Should we expect this to work?
# cumsum is a method belonging to a
a.cumsum()
class Greeter(object):
def hello(self): # Method (more on 'self' later)
print("Hello World")
agreeter = Greeter() # 'Instantiate' the class
print(agreeter)
# agreeter. # Tab complete?
# Note that we don't pass an argument to hello!
agreeter.hello()
class A(object):
def __init__(self):
print("Hello")
a_instance = A()
print(type(a_instance))
class Container(object):
Simple container which stores an array as an instance attribute
and an instance method
def __init__(self, N):
self.data = np.linspace(0, 1, N)
def plot(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.data, 'bx')
mydata = Container(11) # 11 ia passed as 'N' to __init__
print(mydata.__dict__) # __dict__ is where the attr: value
# pairs are stored!
mydata.plot()
class Container(object):
data = np.linspace(0, 1, 5) # class attribute
def __init__(self):
pass
a, b = Container(), Container()
print(a.data)
print(b.data)
a.data = 0 # Creates INSTANCE attribute
Container.data = 100 # Overwrites CLASS attribute
print(a.data)
print(b.data)
class Container(object):
def __init__(self, N):
self.data = np.linspace(0, 1, N)
def print_data(self):
print(self.data)
a = Container(11)
a.print_data() # <<< This is better
Container.print_data(a)
class Fruit(object):
def __init__(self):
self._hasjuice = True
def juice(self):
if not self.isfull(): raise ValueError('No juice!')
self._hasjuice = False
def isfull(self):
return self._hasjuice
orange = Fruit()
print(orange.isfull())
orange.juice()
print(orange.isfull())
# orange._ # tab completion behaviour?
# orange._ # tab completion behaviour now?
orange._hasjuice = True # bad!
orange.isfull()
class Fruit(object):
def __init__(self):
self.__hasjuice = True
def juice(self):
if not self.isfull(): raise ValueError('No juice!')
self.__hasjuice = False
def isfull(self):
return self.__hasjuice
apple = Fruit()
# apple._ # tab completion behaviour?
apple.juice()
apple._Fruit__hasjuice = False # Definitely bad!
apple.isfull()
# Live coding Bay....
class Parent(object):
# Note the base __init__ is overridden in
# Child class
def __init__(self):
pass
def double(self):
return self.data*2
class Child(Parent):
def __init__(self, data):
self.data = data
achild = Child(np.array([0, 1, 5, 10]))
achild.double()
class Plottable(object):
def __init__(self, data):
self.data = data
def plot(self, ax):
ax.plot(self.data)
class SinWave(Plottable):
def __init__(self):
super().__init__(
np.sin(np.linspace(0, np.pi*2, 101)))
class CosWave(Plottable):
def __init__(self):
super().__init__(
np.cos(np.linspace(0, np.pi*2, 101)))
fig = plt.figure()
ax = fig.add_subplot(111)
mysin = SinWave(); mycos = CosWave()
mysin.plot(ax); mycos.plot(ax)
dir(object)
class Wave(object):
def __init__(self, freq):
self.freq = freq
self._data = np.sin(np.linspace(0, np.pi, 101)
* np.pi*2 * freq)
def __str__(self):
RETURNS the string for printing
return "Wave frequency: {}".format(self.freq)
def __lt__(self, wave2):
return self.freq < wave2.freq
wav_low = Wave(10)
wav_high = Wave(50) # A high frequency wave
print(wav_high)
wav_low < wav_high
# Live coding Bay....
class OldSyntax:
pass
class NewSyntax(object): # This means 'inherit from object'
pass
print(type(OldSyntax)) # Would give <type 'classobj'>
# in Python 2
print(type(NewSyntax))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Decision trees are directed graphs beginning with one node and branching to many. They are a hierarchical data structure that represent data by implementing a divide-and-conquer strategy. There are two main types of decision tree
Step2: Now, predict the class of some example collection of features.
Step3: The probability of each class can be predicted too, which is the fraction of training samples of the same class in a leaf.
Step4: We can look at the tree in Graphviz format.
Step5: more detailed example of decision tree classifier using the iris dataset
Step6: The top bit of the dataset looks like this
Step7: Make a decision tree and then fit it using the features ("data") and class labels ("target") of the iris dataset.
Step8: Ok, let's look at the tree, but we'll fancy it up this time with colors and shit.
Step9: Right, so now let's make some predictions.
Step10: How accurate is it? Well, here is what it should have got
Step11: Boom, it's awesome. Well done, decision tree.
Step12: Aait, let's create and fit a decision tree with a depth of like 2 nodes.
Step13: Ok, let's make some predictions and see how it does.
Step14: Damn, that shit is woke!
Step15: Ok, now let's try a tree with greater depth, like 5 nodes.
Step16: Yeah ok, naw.
|
<ASSISTANT_TASK:>
Python Code:
import graphviz
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.datasets
import sklearn.tree
plt.rcParams["figure.figsize"] = [17, 10]
# features
X = [
[0, 0],
[1, 1]
]
# targets
Y = [
0,
1
]
classifier = sklearn.tree.DecisionTreeClassifier()
classifier = classifier.fit(X, Y)
classifier.predict([[2, 2]])
classifier.predict_proba([[2, 2]])
graph = graphviz.Source(sklearn.tree.export_graphviz(classifier, out_file=None))
graph;
iris = sklearn.datasets.load_iris()
pd.DataFrame(
data = np.c_[iris["data"], iris["target"]],
columns = iris["feature_names"] + ["target"]
).head()
classifier = sklearn.tree.DecisionTreeClassifier()
classifier = classifier.fit(iris.data, iris.target)
graph = graphviz.Source(
sklearn.tree.export_graphviz(
classifier,
out_file = None,
feature_names = iris.feature_names,
class_names = iris.target_names,
filled = True,
rounded = False,
special_characters = True,
proportion = True,
)
)
graph.render('iris_DT')
graph
sklearn.tree.export_graphviz(
classifier,
out_file = "tree_1.svg",
feature_names = iris.feature_names,
class_names = iris.target_names,
filled = True,
rounded = False,
special_characters = True,
proportion = True,
)
classifier.predict(iris.data)
iris.target
rng = np.random.RandomState(1)
X = np.sort(5*rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3*(0.5-rng.rand(16))
plt.scatter(X, y, s=30, edgecolor="black", c="red", label="data")
plt.title("a fuck off noisy sine curve")
plt.xlabel("data")
plt.ylabel("target")
plt.show();
regressor = sklearn.tree.DecisionTreeRegressor(max_depth=2)
regressor.fit(X, y);
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_prediction = regressor.predict(X_test)
plt.scatter(X, y, s=30, edgecolor="black", c = "red", label="data")
plt.plot(X_test, y_prediction, color="cornflowerblue", label="max_depth = 2", linewidth=2)
plt.title("just fittin' a noisy sine curve, it's fine")
plt.xlabel("data")
plt.ylabel("target")
plt.legend()
plt.show();
graph = graphviz.Source(
sklearn.tree.export_graphviz(
regressor,
out_file = None,
filled = True,
rounded = False
)
)
graph;
regressor = sklearn.tree.DecisionTreeRegressor(max_depth=5)
regressor.fit(X, y);
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_prediction = regressor.predict(X_test)
plt.scatter(X, y, s=30, edgecolor="black", c="red", label="data")
plt.plot(X_test, y_prediction, color="cornflowerblue", label="max_depth = 5", linewidth=2)
plt.title("just fittin' a noisy sine curve, but what the Bjork?")
plt.xlabel("data")
plt.ylabel("target")
plt.legend()
plt.show();
graph = graphviz.Source(
sklearn.tree.export_graphviz(
regressor,
out_file = None,
filled = True,
rounded = False,
special_characters = True,
proportion = True,
)
)
graph.render('iris_DT')
graph
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You might have to restart your runtime to load these packages.
Step2: Next, create a new GCP account (if you don't have one already), and create a new project.
Step3: Fill in your info below by specifying your project id. You'll also need to choose a bucket name (that should start with gs
Step4: Using the Video Intelligence API
Step6: Now let's run the Video Intelligence API's Person Detection feature on the uploaded video. We pass this function the input path to our file in cloud storage as well as an output path where we'd like the results to be written.
Step7: We've called an asynchronous function here--detect_person--because long videos can take a while to analyze. You can check the status of the analysis by calling operation.done
Step8: Note that even if you restart this notebook, the Video Intelligence API will still be analyzing your video in the cloud! So you won't lose any progress.
Step9: Formatting the Data
Step10: These json files are usually pretty big, so don't print them! Instead, let's just inspect the structure
Step11: It's easy to get lost in all these nested fields! What we really want is the data stored in data['annotation_results][0][person_detection_annotations]. Let's grab it
Step12: In people_annotations, every entry correspond to a person and each person has a unique set of tracks, or tracked segments. We'll use a helper function to parse through the data and rearrange it to make it easier to use for our analyses
Step13: We'll also store the data in a pandas DataFrame (also for convenience), and sort each data point by timestamp
Step14: Phew! The hard bit (parsing the data) is over. Now we can take a look at the results!
Step15: As you can see above, we've organized the data by the position of each body part by timestamp. Note that this works because they're actually only one person in my video--me!
Step16: From the plot above, you can actually identify the time of my serve pretty easily! First, I throw the tennis ball up with my left hand (peak in left wrist). Then, a few seconds later, I hit the ball with my racket (peak in right wrist y).
Step 1
Step17: To compute the angle made by three points, we use the Law of Cosines. Did you forget about this? I did! Imagine a triangle with side lengths a, b, and c. Then, to find 𝛾 (the angle across from side c), the formula is
Step18: Let's compute some useful angles below
Step19: Sweet! Now let's plot those angles over time.
Step20: Now let's plot the results!
Step21: Now, these angles might not be very useful on their own. But when we combine them with position data, we can tell what the angle of my arm was at the height of my serve. In particular, let's take a look at the angle of my elbow and shoulder when my right wrist was at the highest point in the serve.
Step22: These charts might be difficult to read, but but they tell me that when my arm is most extended, the angle of my elbow is about a 200 degree angle.
Step23: Next, we'll use a command line tool called ffmpeg to conver the video into photos, 10 photos per second.
Step24: Below, I use the ffmpeg command to generate snapshots from my video at 20 frames per second. I take a 2 second segment (-t 00
Step25: Now let's analyze those snapshots. Grab your AutoML model id
Step26: Now that we're able to track the ball, let's make a pretty image so we can see what's actually going on
Step27: The code above analyzes the snapshots and creates a gif and video you can check out in the files ball_tracking.mp4 and ball_tracking.gif respectively.
Step28: Here we can plot the ball in space, and see how it leaves my hand and then flies across the court.
Step29: To determine the speed, let's look at the distance the ball travels over time
Step30: You can see that 0.5 to 0.7 seconds is when the ball has been hit and is traveling across the court. So, to compute the speed, let's divide distance by time!
|
<ASSISTANT_TASK:>
Python Code:
!pip install google-cloud-automl
!apt-get install libmagickwand-dev
!pip install pillow
!pip install --upgrade protobuf
!pip install --upgrade google-cloud-videointelligence
import sys
import os
import json
import math
from google.colab import auth
from google.colab import files
import pandas as pd
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
import numpy as np
from google.cloud import automl
from google.cloud import videointelligence_v1p3beta1 as videointelligence
from google.oauth2 import service_account
auth.authenticate_user()
# TODO: REMOVE MY SPECIFIC CONFIG
project_id = 'YOUR_PROJECT_ID' #@param {type: "string"}
bucket = 'gs://YOUR_BUCKET' #@param {type: "string"}
service_account_name="ANY_RANDOM_NAME" #@param {type: "string"}
!gcloud config set project {project_id}
!gsutil mb {bucket}
!gcloud iam service-accounts create {service_account_name}
!gcloud iam service-accounts keys create ./key.json --iam-account {service_account_name}@{project_id}.iam.gserviceaccount.com
# Enable the Video Intelligence API and AutoML
!gcloud services enable videointelligence.googleapis.com
!gcloud services enable automl.googleapis.com
# Give your service account permission to access the API
!gcloud projects add-iam-policy-binding {project_id} --member="serviceAccount:{service_account_name}@{project_id}.iam.gserviceaccount.com" --role="roles/editor"
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./key.json"
file_to_analyze = 'YOUR_SPORTS_VIDEO.mp4' #@param {type: "string"}
# Verify that you see that file listed here...
# This cell should print the name of the file you just uploaded
!gsutil ls {bucket}/{file_to_analyze}
input_uri = os.path.join(bucket, file_to_analyze)
output_uri = os.path.join(bucket, 'output.json')
# This function comes from the docs
# https://cloud.google.com/video-intelligence/docs/people-detection
def detect_person(input_uri, output_uri):
Detects people in a video.
client = videointelligence.VideoIntelligenceServiceClient(credentials=service_account.Credentials.from_service_account_file(
'./key.json'))
# Configure the request
config = videointelligence.types.PersonDetectionConfig(
include_bounding_boxes=True,
include_attributes=True,
include_pose_landmarks=True,
)
context = videointelligence.types.VideoContext(person_detection_config=config)
# Start the asynchronous request
operation = client.annotate_video(
input_uri=input_uri,
output_uri=output_uri,
features=[videointelligence.enums.Feature.PERSON_DETECTION],
video_context=context,
)
return operation
# If you get a permissions episode here, you might have to modify the permissions
# on your bucket to allow your service account to access it. Do that in the
# GCP storage console/UI.
operation = detect_person(input_uri, output_uri)
print(f"Operation ${operation.operation.name} is done? {operation.done()}")
# Note! This won't work unless operation.done() == True!
!mkdir tmp
!gsutil cp {output_uri} tmp
data = json.load(open('./tmp/output.json'))
print(data.keys())
# We only care about annotation_results[0] because we only have one video
print(len(data['annotation_results'][0]['person_detection_annotations']))
people_annotations = data['annotation_results'][0]['person_detection_annotations']
'''
This helper function takes in a person and rearranges the data so it's in
a timeline, which will make it easier for us to work with
'''
def analyzePerson(person):
frames = []
for track in person['tracks']:
# Convert timestamps to seconds
for ts_obj in track['timestamped_objects']:
time_offset = ts_obj['time_offset']
timestamp = 0
if 'nanos' in time_offset:
timestamp += time_offset['nanos'] / 10**9
if 'seconds' in time_offset:
timestamp += time_offset['seconds']
if 'minutes' in time_offset:
timestamp += time_offset['minutes'] * 60
frame= {'timestamp' : timestamp}
for landmark in ts_obj['landmarks']:
frame[landmark['name'] + '_x'] = landmark['point']['x']
# Subtract y value from 1 because positions are calculated
# from the top left corner
frame[landmark['name'] + '_y'] = 1 - landmark['point']['y']
frames.append(frame)
sorted(frames, key=lambda x: x['timestamp'])
return frames
annotationsPd = pd.DataFrame(analyzePerson(people_annotations[0]))
for annotation in people_annotations[1:]:
annotationsPd = annotationsPd.append(pd.DataFrame(analyzePerson(annotation)))
annotationsPd = annotationsPd.sort_values('timestamp', ascending=True)
annotationsPd.head()
plt.figure()
annotationsPd.plot('timestamp', ['left_wrist_y', 'right_wrist_y'], figsize=(20, 5))
plt.title("Left and Right Wrist Positions Over Time")
plt.savefig("wrist_pos")
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def getAngle(a, b, c):
ang = math.degrees(math.atan2(c.y-b.y, c.x-b.x) - math.atan2(a.y-b.y, a.x-b.x))
return ang
def computeElbowAngle(row, which='right'):
wrist = Point(row[f'{which}_wrist_x'], row[f'{which}_wrist_y'])
elbow = Point(row[f'{which}_elbow_x'], row[f'{which}_elbow_y'])
shoulder = Point(row[f'{which}_shoulder_x'], row[f'{which}_shoulder_y'])
return getAngle(wrist, elbow, shoulder)
def computeShoulderAngle(row, which='right'):
elbow = Point(row[f'{which}_elbow_x'], row[f'{which}_elbow_y'])
shoulder = Point(row[f'{which}_shoulder_x'], row[f'{which}_shoulder_y'])
hip = Point(row[f'{which}_hip_x'], row[f'{which}_hip_y'])
return getAngle(hip, shoulder, elbow)
def computeKneeAngle(row, which='right'):
hip = Point(row[f'{which}_hip_x'], row[f'{which}_hip_y'])
knee = Point(row[f'{which}_knee_x'], row[f'{which}_knee_y'])
ankle = Point(row[f'{which}_ankle_x'], row[f'{which}_ankle_y'])
return getAngle(ankle, knee, hip)
# For a single timeslot...
row = annotationsPd.iloc[-1]
print("Elbow angle: " + str(computeElbowAngle(row)))
print("Shoulder angle: " + str(computeShoulderAngle(row)))
print("Knee angle: " + str(computeKneeAngle(row)))
annotationsPd['right_elbow_angle'] = annotationsPd.apply(computeElbowAngle, axis=1)
annotationsPd['right_shoulder_angle'] = annotationsPd.apply(computeShoulderAngle, axis=1)
annotationsPd['right_knee_angle'] = annotationsPd.apply(computeKneeAngle, axis=1)
plt.figure()
annotationsPd.plot('timestamp', ['right_elbow_angle'], figsize=(20, 5), color='blue')
plt.title("Right Elbow Angle over Time")
plt.savefig("right_elbow_angle")
annotationsPd.plot('timestamp', ['right_shoulder_angle'], figsize=(20, 5), color='purple')
plt.title("Right Shoulder Angle over Time")
plt.savefig("right_shoulder_angle")
annotationsPd.plot('timestamp', ['right_knee_angle'], figsize=(20, 5))
plt.title("Right Knee Angle over Time")
plt.savefig("right_knee_angle")
fig = plt.figure()
ax=fig.add_subplot(111, label="1")
annotationsPd.plot('timestamp', ['right_wrist_y'], figsize=(20, 5), ax=ax, color='red')
plt.title("Right Elbow Angle over Time")
ax2=fig.add_subplot(111, label="2", frame_on=False)
annotationsPd.plot('timestamp', ['right_elbow_angle'], figsize=(20, 5), ax=ax2)
#annotationsPd.plot.scatter('right_wrist_y', 'right_elbow_angle')
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
filename, _ = uploaded.popitem()
!mkdir tmp/snapshots
!ffmpeg -i {filename} -vf fps=20 -ss 00:00:01 -t 00:00:02 tmp/snapshots/%03d.jpg
model_id = 'IOD6154100721080860672' #@param {type: "string"}
def getAutoMLPrediction(filename):
with open(filename, 'rb') as ff:
content = ff.read()
prediction_client = automl.PredictionServiceClient()
name = 'projects/{}/locations/us-central1/models/{}'.format(project_id, model_id)
params = {"score_threshold": "0.7"} # this metric changes the sensitivity of your model
image = automl.types.Image(image_bytes=content)
payload = automl.types.ExamplePayload(image=image)
return prediction_client.predict(name, payload, params)
def getBallsCoords(filename):
res = getAutoMLPrediction(filename)
return [obj.image_object_detection.bounding_box.normalized_vertices for obj in res.payload]
snapshotFiles = os.listdir('tmp/snapshots')
snapshotFiles.sort()
print(f"Analyzing {len(snapshotFiles)} images")
def makeBallImage(filename, coords):
im = Image.open(filename)
im.thumbnail((im.width * 0.2, im.height * 0.2))
draw = ImageDraw.Draw(im)
for coord in coords:
draw.rectangle([(coord[0].x * im.width, coord[0].y * im.height), coord[1].x * im.width, coord[1].y * im.height])
return im
# Call the AutoML API--this could take a while!
coords = [getBallsCoords('tmp/snapshots/' + filename) for filename in snapshotFiles if 'jpg' in filename]
imgs = [makeBallImage('tmp/snapshots/' + filename, coord) for filename, coord in zip(snapshotFiles, coords) if 'jpg' in filename]
!mkdir snapshot_annotated
for idx, im in enumerate(imgs):
plt.imshow(np.asarray(im))
plt.savefig('snapshot_annotated/file%d.png' % idx)
# Create a cute video of your seves!
!ffmpeg -framerate 20 -i snapshot_annotated/file%01d.png -vcodec mpeg4 -y ball_tracking.mp4
!ffmpeg -i ball_tracking.mp4 ball_tracking.gif
# For simplicity, we'll just plot the bottom left corner of the bounding box
# around the ball
coord_x = [ball[0].x for frame in coords for ball in frame]
coord_y = [1 - ball[0].y for frame in coords for ball in frame]
timestamps = [x/20 for x in range(len(coord_x))] # 20 frames per second
plt.title("Position of tennis ball during serve")
plt.xlabel("X Position")
plt.ylabel("Y Position")
plt.scatter(coord_x, coord_y)
plt.savefig("serve_position_x_y.png")
plt.title("Y position of tennis ball during serve over time")
plt.xlabel("seconds")
plt.ylabel("Y position")
plt.scatter(timestamps, coord_y)
plt.savefig("ball_position_over_time.png")
# Get the first data point from 0.5 seconds
start_x = coord_x[timestamps.index(0.5)]
end_x = coord_x[-1]
start_y = coord_y[timestamps.index(0.5)]
end_y = coord_y[-1]
# Compute the Euclidean distance
distance = math.sqrt((start_x - end_x)**2 + (start_y - end_y)**2)
time = timestamps[-1] - 0.5
print(f"The speed of your serve was {distance/time}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 如何使用 TF-Hub 解决 Kaggle 上的问题
Step2: 由于本教程将使用 Kaggle 中的数据集,因此需要为您的 Kaggle 帐号创建 API 令牌,并将其上传到 Colab 环境。
Step3: 开始
Step4: 注:本竞赛的任务不是对整个评论进行评分,而是对评论中的各个短语进行评分。这是一项更加艰巨的任务。
Step5: 训练模型
Step6: 预测
Step7: 混淆矩阵
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
!pip install -q kaggle
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import zipfile
from sklearn import model_selection
import os
import pathlib
# Upload the API token.
def get_kaggle():
try:
import kaggle
return kaggle
except OSError:
pass
token_file = pathlib.Path("~/.kaggle/kaggle.json").expanduser()
token_file.parent.mkdir(exist_ok=True, parents=True)
try:
from google.colab import files
except ImportError:
raise ValueError("Could not find kaggle token.")
uploaded = files.upload()
token_content = uploaded.get('kaggle.json', None)
if token_content:
token_file.write_bytes(token_content)
token_file.chmod(0o600)
else:
raise ValueError('Need a file named "kaggle.json"')
import kaggle
return kaggle
kaggle = get_kaggle()
SENTIMENT_LABELS = [
"negative", "somewhat negative", "neutral", "somewhat positive", "positive"
]
# Add a column with readable values representing the sentiment.
def add_readable_labels_column(df, sentiment_value_column):
df["SentimentLabel"] = df[sentiment_value_column].replace(
range(5), SENTIMENT_LABELS)
# Download data from Kaggle and create a DataFrame.
def load_data_from_zip(path):
with zipfile.ZipFile(path, "r") as zip_ref:
name = zip_ref.namelist()[0]
with zip_ref.open(name) as zf:
return pd.read_csv(zf, sep="\t", index_col=0)
# The data does not come with a validation set so we'll create one from the
# training set.
def get_data(competition, train_file, test_file, validation_set_ratio=0.1):
data_path = pathlib.Path("data")
kaggle.api.competition_download_files(competition, data_path)
competition_path = (data_path/competition)
competition_path.mkdir(exist_ok=True, parents=True)
competition_zip_path = competition_path.with_suffix(".zip")
with zipfile.ZipFile(competition_zip_path, "r") as zip_ref:
zip_ref.extractall(competition_path)
train_df = load_data_from_zip(competition_path/train_file)
test_df = load_data_from_zip(competition_path/test_file)
# Add a human readable label.
add_readable_labels_column(train_df, "Sentiment")
# We split by sentence ids, because we don't want to have phrases belonging
# to the same sentence in both training and validation set.
train_indices, validation_indices = model_selection.train_test_split(
np.unique(train_df["SentenceId"]),
test_size=validation_set_ratio,
random_state=0)
validation_df = train_df[train_df["SentenceId"].isin(validation_indices)]
train_df = train_df[train_df["SentenceId"].isin(train_indices)]
print("Split the training data into %d training and %d validation examples." %
(len(train_df), len(validation_df)))
return train_df, validation_df, test_df
train_df, validation_df, test_df = get_data(
"sentiment-analysis-on-movie-reviews",
"train.tsv.zip", "test.tsv.zip")
train_df.head(20)
class MyModel(tf.keras.Model):
def __init__(self, hub_url):
super().__init__()
self.hub_url = hub_url
self.embed = hub.load(self.hub_url).signatures['default']
self.sequential = tf.keras.Sequential([
tf.keras.layers.Dense(500),
tf.keras.layers.Dense(100),
tf.keras.layers.Dense(5),
])
def call(self, inputs):
phrases = inputs['Phrase'][:,0]
embedding = 5*self.embed(phrases)['default']
return self.sequential(embedding)
def get_config(self):
return {"hub_url":self.hub_url}
model = MyModel("https://tfhub.dev/google/nnlm-en-dim128/1")
model.compile(
loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.optimizers.Adam(),
metrics = [tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")])
history = model.fit(x=dict(train_df), y=train_df['Sentiment'],
validation_data=(dict(validation_df), validation_df['Sentiment']),
epochs = 25)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
train_eval_result = model.evaluate(dict(train_df), train_df['Sentiment'])
validation_eval_result = model.evaluate(dict(validation_df), validation_df['Sentiment'])
print(f"Training set accuracy: {train_eval_result[1]}")
print(f"Validation set accuracy: {validation_eval_result[1]}")
predictions = model.predict(dict(validation_df))
predictions = tf.argmax(predictions, axis=-1)
predictions
cm = tf.math.confusion_matrix(validation_df['Sentiment'], predictions)
cm = cm/cm.numpy().sum(axis=1)[:, tf.newaxis]
sns.heatmap(
cm, annot=True,
xticklabels=SENTIMENT_LABELS,
yticklabels=SENTIMENT_LABELS)
plt.xlabel("Predicted")
plt.ylabel("True")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data structure
Step2: VGG16() setup boilerplate
Step3: Load in data with generators
Step4: Finetuning the model
Step5: New model architecture
Step6: Batch normalisation
Step7: Data augmentation
Step8: Conv stack output
Step9: Test convolutions
Step10: Save everything to disk
Step11: Train fully connected layers only
Step12: Use data to train model
Step13: Load weights from trained model, and generate predictions
Step14: Convert to proper CSV
|
<ASSISTANT_TASK:>
Python Code:
import os
import zipfile
import shutil
import csv
import bcolz
os.environ["KERAS_BACKEND"] = "theano"
import keras
import numpy as np
from keras.utils.data_utils import get_file
from keras.models import load_model
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Dropout, Flatten, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
model_url = "http://files.fast.ai/models/"
model_name = "vgg16.h5"
cache_dir = "models"
raw_path = os.path.join(os.getcwd(), os.pardir, 'data', 'raw')
processed_path = os.path.join(os.getcwd(), os.pardir, 'data', 'processed')
# Make directories sample, valid, train, test, first check if this whole step is necessary
if os.path.exists(os.path.join(processed_path, 'sample')):
print 'Sample directory already exists, no need to do data structuring!'
else:
os.mkdir(os.path.join(processed_path, 'sample'))
os.mkdir(os.path.join(processed_path, 'sample', 'train'))
os.mkdir(os.path.join(processed_path, 'sample', 'valid'))
os.mkdir(os.path.join(processed_path, 'valid'))
# Extract Kaggle zipfiles to correct path
print 'Extracting zips, this may take a while...'
img_zip_handle = zipfile.ZipFile(os.path.join(raw_path, 'imgs.zip'), 'r')
img_zip_handle.extractall(processed_path)
img_zip_handle.close()
csv_zip_handle = zipfile.ZipFile(os.path.join(raw_path, 'driver_imgs_list.csv.zip'), 'r')
csv_zip_handle.extractall(processed_path)
csv_zip_handle.close()
print 'Done extracting zips!'
# Set up sample directory structure
for i in range(10):
dirname = 'c' + str(i)
os.mkdir(os.path.join(processed_path, 'sample', 'train', dirname))
os.mkdir(os.path.join(processed_path, 'sample', 'valid', dirname))
os.mkdir(os.path.join(processed_path, 'valid', dirname))
os.mkdir(os.path.join(processed_path, 'test', 'unknown'))
for filename in os.listdir(os.path.join(processed_path, 'test')):
if filename.endswith('.jpg'):
src = os.path.join(processed_path, 'test', filename)
dest = os.path.join(processed_path, 'test', 'unknown', filename)
shutil.move(src, dest)
data = np.genfromtxt(os.path.join(processed_path, 'driver_imgs_list.csv'), delimiter=',', dtype=None)
data = data[1:,:]
drivers = np.unique(data[:,0])
num_drivers = drivers.shape[0]
# Throw 15% of train data into sample folder
sample_drivers_amount = int(np.floor(num_drivers*0.15))
sample_drivers = np.random.choice(drivers, sample_drivers_amount, replace=False)
# Throw 20% of train data into valid folder
validation_drivers_amount = int(np.floor(num_drivers*0.2))
validation_drivers = np.random.choice(drivers, validation_drivers_amount, replace=False)
# Set up sample set
for i in range(sample_drivers_amount):
driver_name = sample_drivers[i]
driver_columns = data[data[:,0] == driver_name]
for j in range(10):
driver_class = 'c' + str(j)
dest = os.path.join(processed_path, 'sample', 'train', driver_class)
class_columns = driver_columns[driver_columns[:,1] == driver_class]
for filename in class_columns[:,2]:
src = os.path.join(processed_path, 'train', driver_class, filename)
shutil.copyfile(src, os.path.join(dest, filename))
# Now move from sample_train to sample_validation a fraction of ~40%
sample_drivers_validation_amount = int(np.floor(sample_drivers_amount*0.4))
sample_drivers_validation = np.random.choice(sample_drivers,
sample_drivers_validation_amount,
replace=False)
for i in range(sample_drivers_validation_amount):
driver_name = sample_drivers_validation[i]
driver_columns = data[data[:,0] == driver_name]
for j in range(10):
driver_class = 'c' + str(j)
class_columns = driver_columns[driver_columns[:,1] == driver_class]
for filename in class_columns[:,2]:
dest = os.path.join(processed_path, 'sample', 'valid', driver_class, filename)
src = os.path.join(processed_path, 'sample', 'train', driver_class, filename)
shutil.move(src, dest)
# Set up validation set
for i in range(validation_drivers_amount):
driver_name = validation_drivers[i]
driver_columns = data[data[:,0] == driver_name]
for j in range(10):
driver_class = 'c' + str(j)
class_columns = driver_columns[driver_columns[:,1] == driver_class]
for filename in class_columns[:,2]:
src = os.path.join(processed_path, 'train', driver_class, filename)
dest = os.path.join(processed_path, 'valid', driver_class, filename)
shutil.move(src, dest)
def add_conv_block(model, layers, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
return model
def add_fc_block(model, dropout):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(dropout))
return model
class vgg16():
def __init__(self, dropout=0.5):
self.vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([3,1,1])
self.create(dropout)
def create(self, dropout):
def vgg_preprocess(x, mean):
mean = np.array(mean)
x = x - mean
return x[:,:,::-1]
model = self.model = Sequential()
model.add(Lambda(vgg_preprocess,
input_shape=(3, 244, 244),
output_shape=(3, 244, 244),
arguments = {'mean': self.vgg_mean.tolist()}
))
model = add_conv_block(model, 2, 64)
model = add_conv_block(model, 2, 128)
model = add_conv_block(model, 3, 256)
model = add_conv_block(model, 3, 512)
model = add_conv_block(model, 3, 512)
model.add(Flatten())
model = add_fc_block(model, dropout)
model = add_fc_block(model, dropout)
model.add(Dense(1000, activation='softmax'))
model = model.load_weights(get_file(model_name, model_url+model_name, cache_subdir=cache_dir))
DEBUG = True
data_dir = os.path.join(os.getcwd(), os.pardir, 'data')
model_dir = os.path.join(os.getcwd(), os.pardir, 'models')
if DEBUG == True:
path = os.path.join(data_dir, 'processed', 'sample')
batch_size = 4
epochs = 2
elif DEBUG == False:
path = os.path.join(data_dir, 'processed')
batch_size = 64
epochs = 5
train_path = os.path.join(path, 'train')
val_path = os.path.join(path, 'valid')
train_batches = ImageDataGenerator().flow_from_directory(train_path,
target_size=(244,244),
batch_size=batch_size,
shuffle=True)
val_batches = ImageDataGenerator().flow_from_directory(val_path,
target_size=(244,244),
batch_size=batch_size,
shuffle=True)
lr = 0.001
model = vgg16(dropout=0.5).model
model.pop()
for layer in model.layers: layer.trainable=False
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=Adam(lr), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_batches,
samples_per_epoch=train_batches.nb_sample,
nb_epoch=epochs,
validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.save(os.path.join(model_dir, 'model_with_new_top.h5'))
old_model = load_model(os.path.join(os.getcwd(),
os.pardir,
'models',
'model_with_new_top.h5'))
flatten_index = [index for index,layer in enumerate(old_model.layers) if type(layer).__name__ == 'Flatten'][0]
conv_model_layers = old_model.layers[1:flatten_index-1]
conv_model = Sequential(conv_model_layers)
def fc_model(dropout):
model = Sequential()
model.add(MaxPooling2D(input_shape=conv_model.layers[-1].output_shape[1:]))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(Dense(10, activation='softmax'))
return model
DEBUG = False
data_dir = os.path.join(os.getcwd(), os.pardir, 'data')
model_dir = os.path.join(os.getcwd(), os.pardir, 'models')
test_path = os.path.join(path, 'test')
if DEBUG == True:
path = os.path.join(data_dir, 'processed', 'sample')
batch_size = 4
epochs = 2
elif DEBUG == False:
path = os.path.join(data_dir, 'processed')
batch_size = 64
epochs = 5
train_path = os.path.join(path, 'train')
val_path = os.path.join(path, 'valid')
train_image_gen = ImageDataGenerator(rotation_range=15,
height_shift_range=0.05,
width_shift_range=0.1,
shear_range = 0.1,
channel_shift_range=20,
)
aug_train_batches = train_image_gen.flow_from_directory(train_path,
target_size=(244,244),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
train_batches = ImageDataGenerator().flow_from_directory(train_path,
target_size=(244,244),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
val_batches = ImageDataGenerator().flow_from_directory(val_path,
target_size=(244,244),
batch_size=batch_size,
shuffle=False)
print 'Predicting, this may take a while...'
conv_model_predictions_augmented = conv_model.predict_generator(aug_train_batches,
aug_train_batches.nb_sample*2,
)
conv_model_predictions = conv_model.predict_generator(train_batches,
train_batches.nb_sample,
)
val_predictions = conv_model.predict_generator(val_batches,
val_batches.nb_sample,
)
print 'Done predicting!'
# Concatenating augmented and non-augmented predictions
conv_model_predictions = np.concatenate([conv_model_predictions_augmented, conv_model_predictions])
prediction_labels = to_categorical(train_batches.classes)
prediction_labels = np.concatenate([prediction_labels]*3)
test_path = os.path.join(path, 'test')
test_generator = ImageDataGenerator().flow_from_directory(test_path,
target_size=(244,244),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
print 'Predicting test features, this might take a while...'
conv_model_test_inputs = conv_model.predict_generator(test_generator,
test_generator.nb_sample
)
print 'Done predicting!'
save_array(os.path.join(model_dir, 'test_inputs.bc'), conv_model_test_inputs)
def save_array(location, array):
instance = bcolz.carray(array, rootdir=location, mode='w')
instance.flush()
def load_array(location):
return bcolz.open(location)[:]
save_array(os.path.join(model_dir, 'conv_predictions.bc'), conv_model_predictions)
save_array(os.path.join(model_dir, 'conv_labels.bc'), prediction_labels)
save_array(os.path.join(model_dir, 'val_predictions.bc'), val_predictions)
save_array(os.path.join(model_dir, 'val_labels.bc'), to_categorical(val_batches.classes))
conv_predictions = load_array(os.path.join(model_dir, 'conv_predictions.bc'))
conv_labels = load_array(os.path.join(model_dir, 'conv_labels.bc'))
conv_val_predictions = load_array(os.path.join(model_dir, 'val_predictions.bc'))
conv_val_labels = load_array(os.path.join(model_dir, 'val_labels.bc'))
dropout = 0.8
model = fc_model(dropout)
epochs = 10
lr = 0.0001
model.compile(optimizer=Adam(lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.optimizer.lr.set_value(lr)
model.fit(conv_predictions,
conv_labels,
batch_size=batch_size,
nb_epoch=epochs,
validation_data=(conv_val_predictions, conv_val_labels))
lr = 0.00001
epochs = 2
model.optimizer.lr.set_value(lr)
model.fit(conv_predictions,
conv_labels,
batch_size=batch_size,
nb_epoch=epochs,
validation_data=(conv_val_predictions, conv_val_labels))
model.save_weights(os.path.join(model_dir, 'final_predictor.h5'))
dropout = 0.8
model = fc_model(dropout)
lr = 0.0001
model.compile(optimizer=Adam(lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.optimizer.lr.set_value(lr)
model.load_weights(os.path.join(model_dir, 'final_predictor.h5'))
test_input = load_array(os.path.join(model_dir, 'test_inputs.bc'))
test_predictions = model.predict(test_input)
test_predictions[1:3,:]
clipped_predictions = np.clip(test_predictions, 0.02, 0.98)
filename_list = [filename for filename in os.listdir(os.path.join(test_path, 'unknown'))]
filename_array = np.transpose(np.array(filename_list, ndmin=2))
csv_headless = np.concatenate([filename_array, clipped_predictions], axis=1)
header_list = [
'img',
'c0',
'c1',
'c2',
'c3',
'c4',
'c5',
'c6',
'c7',
'c8',
'c9',
]
header_line = np.array(header_list, ndmin=2)
ans_array = np.concatenate([header_line, csv_headless])
# ans_array = ans_array.astype('|S10')
np.savetxt(os.path.join(data_dir, "submission.csv"), ans_array, delimiter=',', fmt='%s')
data_dir
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introducing TinyImageNet
Step2: TinyImageNet-100-A classes
Step3: Visualize Examples
Step4: Pretrained model
Step5: Pretrained model performance
Step7: Saliency Maps
Step8: Once you have completed the implementation in the cell above, run the following to visualize some class saliency maps on the validation set of TinyImageNet-100-A.
Step10: Fooling Images
Step11: Run the following to choose a random validation set image that is correctly classified by the network, and then make a fooling image.
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time, os, json
import numpy as np
import skimage.io
import matplotlib.pyplot as plt
from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A', subtract_mean=True)
for i, names in enumerate(data['class_names']):
print i, ' '.join('"%s"' % name for name in names)
# Visualize some examples of the training data
classes_to_show = 7
examples_per_class = 5
class_idxs = np.random.choice(len(data['class_names']), size=classes_to_show, replace=False)
for i, class_idx in enumerate(class_idxs):
train_idxs, = np.nonzero(data['y_train'] == class_idx)
train_idxs = np.random.choice(train_idxs, size=examples_per_class, replace=False)
for j, train_idx in enumerate(train_idxs):
img = deprocess_image(data['X_train'][train_idx], data['mean_image'])
plt.subplot(examples_per_class, classes_to_show, 1 + i + classes_to_show * j)
if j == 0:
plt.title(data['class_names'][class_idx][0])
plt.imshow(img)
plt.gca().axis('off')
plt.show()
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')
batch_size = 100
# Test the model on training data
mask = np.random.randint(data['X_train'].shape[0], size=batch_size)
X, y = data['X_train'][mask], data['y_train'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Training accuracy: ', (y_pred == y).mean()
# Test the model on validation data
mask = np.random.randint(data['X_val'].shape[0], size=batch_size)
X, y = data['X_val'][mask], data['y_val'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Validation accuracy: ', (y_pred == y).mean()
def compute_saliency_maps(X, y, model):
Compute a class saliency map using the model for images X and labels y.
Input:
- X: Input images, of shape (N, 3, H, W)
- y: Labels for X, of shape (N,)
- model: A PretrainedCNN that will be used to compute the saliency map.
Returns:
- saliency: An array of shape (N, H, W) giving the saliency maps for the input
images.
saliency = None
##############################################################################
# TODO: Implement this function. You should use the forward and backward #
# methods of the PretrainedCNN class, and compute gradients with respect to #
# the unnormalized class score of the ground-truth classes in y. #
##############################################################################
scores, cache = model.forward(X, mode='test')
dscores = np.zeros_like(scores)
for n,idxN in enumerate(y):
dscores[n, idxN ] = 1
dX, grads = model.backward(dscores, cache)
saliency = dX.max(axis=1)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return saliency
def show_saliency_maps(mask):
mask = np.asarray(mask)
X = data['X_val'][mask]
y = data['y_val'][mask]
saliency = compute_saliency_maps(X, y, model)
for i in xrange(mask.size):
plt.subplot(2, mask.size, i + 1)
plt.imshow(deprocess_image(X[i], data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y[i]][0])
plt.subplot(2, mask.size, mask.size + i + 1)
plt.title(mask[i])
plt.imshow(saliency[i])
plt.axis('off')
plt.gcf().set_size_inches(10, 4)
plt.show()
# Show some random images
mask = np.random.randint(data['X_val'].shape[0], size=5)
show_saliency_maps(mask)
# These are some cherry-picked images that should give good results
show_saliency_maps([128, 3225, 2417, 1640, 4619])
from cs231n.layers import softmax_loss
def make_fooling_image(X, target_y, model):
Generate a fooling image that is close to X, but that the model classifies
as target_y.
Inputs:
- X: Input image, of shape (1, 3, 64, 64)
- target_y: An integer in the range [0, 100)
- model: A PretrainedCNN
Returns:
- X_fooling: An image that is close to X, but that is classifed as target_y
by the model.
X_fooling = X.copy()
##############################################################################
# TODO: Generate a fooling image X_fooling that the model will classify as #
# the class target_y. Use gradient ascent on the target class score, using #
# the model.forward method to compute scores and the model.backward method #
# to compute image gradients. #
# #
# HINT: For most examples, you should be able to generate a fooling image #
# in fewer than 100 iterations of gradient ascent. #
##############################################################################
eps = 0.25
for i in range(1000):
scores, cache = model.forward(X_fooling)
if scores[0].argmax() == target_y:
# You fool!
print 'You fool! Iterations: ',i
break
_, dscores = softmax_loss(scores, target_y)
dX, grads = model.backward(dscores, cache)
# Sign of the gradient
sign_dX = (dX > 0).astype(np.float32)
# adding an imperceptibly small vector whose elements are equal to
# the sign of the elements of the gradient of the cost function with
# respect to the input [https://arxiv.org/pdf/1412.6572v3.pdf]
X_fooling -= eps * sign_dX
##############################################################################
# END OF YOUR CODE #
##############################################################################
return X_fooling
# Find a correctly classified validation image
while True:
i = np.random.randint(data['X_val'].shape[0])
X = data['X_val'][i:i+1]
y = data['y_val'][i:i+1]
y_pred = model.loss(X)[0].argmax()
if y_pred == y: break
target_y = 67
X_fooling = make_fooling_image(X, target_y, model)
# Make sure that X_fooling is classified as y_target
scores = model.loss(X_fooling)
assert scores[0].argmax() == target_y, 'The network is not fooled!'
# Show original image, fooling image, and difference
plt.subplot(1, 3, 1)
plt.imshow(deprocess_image(X, data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y][0])
plt.subplot(1, 3, 2)
plt.imshow(deprocess_image(X_fooling, data['mean_image'], renorm=True))
plt.title(data['class_names'][target_y][0])
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Difference')
plt.imshow(deprocess_image(X - X_fooling, data['mean_image']))
plt.axis('off')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Effect on S(Q)
Step2: The two plots clearly show that the optimization on a not extrapolated S(Q) results in an artificial lower intensity of the first sharp diffraction peak. Pointing to that extrapolation is needed for a sensible data analysis.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import sys
import matplotlib.pyplot as plt
sys.path.insert(1, os.path.join(os.getcwd(), '../../'))
from glassure.core.calc import calculate_fr, calculate_sq, optimize_sq, calculate_gr
from glassure.core.utility import extrapolate_to_zero_poly, convert_density_to_atoms_per_cubic_angstrom
from glassure.core import Pattern
import numpy as np
data_pattern = Pattern.from_file('../tests/data/Mg2SiO4_ambient.xy')
bkg_pattern = Pattern.from_file('../tests/data/Mg2SiO4_ambient_bkg.xy')
sample_pattern = data_pattern - bkg_pattern
composition = {'Mg': 2, 'Si':1, 'O':4}
density = 2.9
atomic_density = convert_density_to_atoms_per_cubic_angstrom(composition, density)
sq = calculate_sq(sample_pattern.limit(0,20), density, composition)
sq_opt = optimize_sq(sq, 1.4, 10, atomic_density)
sq_extr= extrapolate_to_zero_poly(sq, 1.5, replace=True)
sq_extr_opt = optimize_sq(sq_extr, 1.4, 10, atomic_density)
plt.figure(figsize=(12, 15))
plt.subplot(2,1,1)
plt.plot(*sq.data, label='raw')
plt.plot(*sq_opt.data, label='opt')
plt.plot(*sq_extr_opt.data, label='extra_opt')
plt.xlabel('Q $(\AA^{-1})$')
plt.ylabel('S(Q)')
plt.legend()
plt.subplot(2,1,2)
plt.plot(*sq.data, label='raw')
plt.plot(*sq_opt.data, label='opt')
plt.plot(*sq_extr_opt.data, label='extra_opt')
plt.xlabel('Q $(\AA^{-1})$')
plt.ylabel('S(Q)')
plt.xlim(0, 7)
plt.legend(loc='best')
fr = calculate_fr(sq, use_modification_fcn=True)
fr_extr = calculate_fr(sq_extr, use_modification_fcn=True)
fr_opt = calculate_fr(sq_opt, use_modification_fcn=True)
fr_extr_opt = calculate_fr(sq_extr_opt, use_modification_fcn=True)
gr = calculate_gr(fr, density, composition)
gr_extr = calculate_gr(fr_extr, density, composition)
gr_opt = calculate_gr(fr_opt, density, composition)
gr_extr_opt = calculate_gr(fr_extr_opt, density, composition)
plt.figure(figsize=(12,8))
plt.subplot(1, 2, 1)
plt.plot(*fr.data, label='raw', color='k', ls='-')
plt.plot(*fr_extr.data, label='raw_extr', color='r', ls='-')
plt.plot(*fr_opt.data, label='opt', color='k', ls='--')
plt.plot(*fr_extr_opt.data, label='extr_opt', color='r', ls='--')
plt.xlim(0,5)
plt.legend(loc='best')
plt.xlabel('r $(\AA)$')
plt.ylabel('F(r)')
plt.subplot(1, 2, 2)
plt.plot(*gr.data, label='raw', color='k', ls='-')
plt.plot(*gr_extr.data, label='raw_extr', color='r', ls='-')
plt.plot(*gr_opt.data, label='opt', color='k', ls='--')
plt.plot(*gr_extr_opt.data, label='extr_opt', color='r', ls='--')
plt.ylim(-0.2, 2)
plt.xlim(0, 5)
plt.legend(loc='best')
plt.xlabel('r $(\AA)$')
plt.ylabel('g(r)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Objective function
Step2: Optimisation using decision trees
Step3: Partial dependence plot
Step4: It is possible to change the location of the red dot, which normally shows
Step5: Plot without partial dependence
Step6: Modify the shown minimum
Step7: "expected_minimum_random" is a naive way of finding the minimum of the
Step8: We can also specify how many initial samples are used for the two different
Step9: Set a minimum location
|
<ASSISTANT_TASK:>
Python Code:
print(__doc__)
import sys
from skopt.plots import plot_objective
from skopt import forest_minimize
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
# Here we define a function that we evaluate.
def funny_func(x):
s = 0
for i in range(len(x)):
s += (x[i] * i) ** 2
return s
bounds = [(-1, 1.), ] * 3
n_calls = 150
result = forest_minimize(funny_func, bounds, n_calls=n_calls,
base_estimator="ET",
random_state=4)
_ = plot_objective(result, n_points=10)
_ = plot_objective(result, n_points=10, minimum='expected_minimum')
_ = plot_objective(result, sample_source='result', n_points=10)
_ = plot_objective(result, n_points=10, sample_source='expected_minimum',
minimum='expected_minimum')
_ = plot_objective(result, n_points=10, sample_source='expected_minimum_random',
minimum='expected_minimum_random')
_ = plot_objective(result, n_points=10, sample_source='expected_minimum_random',
minimum='expected_minimum_random',
n_minimum_search=10)
_ = plot_objective(result, n_points=10, sample_source="expected_minimum",
minimum='expected_minimum', n_minimum_search=2)
_ = plot_objective(result, n_points=10, sample_source=[1, -0.5, 0.5],
minimum=[1, -0.5, 0.5])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Layers
Step2: Heatmap layer
Step3: Velocity
Step4: Controls
Step5: Clean
|
<ASSISTANT_TASK:>
Python Code:
from ipyleaflet import Map, basemaps, basemap_to_tiles
center = (52.204793, 360.121558)
m = Map(
layers=(basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2018-11-12"), ),
center=center,
zoom=4
)
m
from ipyleaflet import Marker, Icon
icon = Icon(icon_url='https://leafletjs.com/examples/custom-icons/leaf-red.png', icon_size=[38, 95], icon_anchor=[22,94])
mark = Marker(location=center, icon=icon, rotation_origin='22px 94px')
m.add_layer(mark)
import time
for _ in range(40):
mark.rotation_angle += 15
time.sleep(0.1)
from ipywidgets import Button, IntSlider, link
from ipyleaflet import Heatmap
from random import gauss
import time
center = (37.09, -103.66)
zoom = 5
def create_random_data(length):
"Return a list of some random lat/lon/value triples."
return [[gauss(center[0], 2),
gauss(center[1], 4),
gauss(700, 300)] for i in range(length)]
m.center = center
m.zoom = zoom
heat = Heatmap(locations=create_random_data(1000), radius=20, blur=10)
m.add_layer(heat)
def generate(_):
heat.locations = create_random_data(1000)
button = Button(description='Generate data', button_style='success')
button.on_click(generate)
button
m
slider = IntSlider(min=10, max=30, value=heat.radius)
link((slider, 'value'), (heat, 'radius'))
slider
from ipyleaflet import Velocity
import xarray as xr
center = (0, 0)
zoom = 4
m2 = Map(center=center, zoom=zoom, interpolation='nearest', basemap=basemaps.CartoDB.DarkMatter)
m2
ds = xr.open_dataset('src/wind-global.nc')
display_options = {
'velocityType': 'Global Wind',
'displayPosition': 'bottomleft',
'displayEmptyString': 'No wind data'
}
wind = Velocity(data=ds,
zonal_speed='u_wind',
meridional_speed='v_wind',
latitude_dimension='lat',
longitude_dimension='lon',
velocity_scale=0.01,
max_velocity=20,
display_options=display_options)
m2.add_layer(wind)
from ipyleaflet import Map, basemaps, basemap_to_tiles, SplitMapControl
m = Map(center=(42.6824, 365.581), zoom=5)
right_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2017-11-11")
left_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisAquaBands721CR, "2017-11-11")
control = SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
m
from ipywidgets import Widget
Widget.close_all()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: record schedules for 2 weeks, then augment count with weekly flight numbers.
Step2: good dates
Step3: Save
|
<ASSISTANT_TASK:>
Python Code:
L=json.loads(file('../json/L.json','r').read())
M=json.loads(file('../json/M.json','r').read())
N=json.loads(file('../json/N.json','r').read())
import requests
AP={}
for c in M:
if c not in AP:AP[c]={}
for i in range(len(L[c])):
AP[c][N[c][i]]=L[c][i]
baseurl='https://www.airportia.com/'
import requests, urllib2
def urlgetter(url):
s = requests.Session()
cookiesopen = s.get(url)
cookies=str(s.cookies)
fcookies=[[k[:k.find('=')],k[k.find('=')+1:k.find(' for ')]] for k in cookies[cookies.find('Cookie '):].split('Cookie ')[1:]]
#push token
opener = urllib2.build_opener()
for k in fcookies:
opener.addheaders.append(('Cookie', k[0]+'='+k[1]))
#read html
return s.get(url).content
SD={}
SC=json.loads(file('../json/SC2.json','r').read())
#pop out last - if applicable
try: SD.pop(c)
except: pass
for h in range(len(AP.keys())):
c=AP.keys()[h]
#country not parsed yet
if c in SC:
if c not in SD:
SD[c]=[]
print h,c
airportialinks=AP[c]
sch={}
#all airports of country, where there is traffic
for i in airportialinks:
if i in SC[c]:
print i,
if i not in sch:sch[i]={}
url=baseurl+airportialinks[i]
m=urlgetter(url)
for d in range (3,17):
#date not parsed yet
if d not in sch[i]:
url=baseurl+airportialinks[i]+'departures/201704'+str(d)
m=urlgetter(url)
soup = BeautifulSoup(m, "lxml")
#if there are flights at all
if len(soup.findAll('table'))>0:
sch[i][d]=pd.read_html(m)[0]
else: print '--W-',d,
SD[c]=sch
print
dbpath='E:/Dropbox/Public/datarepo/aviation/' #large file db path
file(dbpath+"json/SD_dest.json",'w').write(repr(SD))
cnc_path='../../universal/countries/'
cnc=pd.read_excel(cnc_path+'cnc.xlsx').set_index('Name')
MDF=pd.DataFrame()
for c in SD:
sch=SD[c]
mdf=pd.DataFrame()
for i in sch:
for d in sch[i]:
df=sch[i][d].drop(sch[i][d].columns[3:],axis=1).drop(sch[i][d].columns[0],axis=1)
df['From']=i
df['Date']=d
mdf=pd.concat([mdf,df])
mdf=mdf.replace('Hahn','Frankfurt')
mdf=mdf.replace('Hahn HHN','Frankfurt HHN')
if len(sch)>0:
mdf['City']=[i[:i.rfind(' ')] for i in mdf['To']]
mdf['Airport']=[i[i.rfind(' ')+1:] for i in mdf['To']]
cpath=str(cnc.T.loc[c]['ISO2']).lower()
if cpath=='nan':cpath='na'
file('../countries/'+cpath+"/json/mdf_dest.json",'w').write(json.dumps(mdf.reset_index().to_json()))
MDF=pd.concat([MDF,mdf])
print c,
dbpath='E:/Dropbox/Public/datarepo/aviation/' #large file db path
MDF.reset_index().to_json(dbpath+'json/MDF_dest.json')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccr-iitm', 'sandbox-3', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sommer 2015
Step2: Warum geht kein Join ??
Step3: Der Ansatz mit Join funktioniert in dieser Form nicht, da spätestens beim 2. Join die Firma Trappo mit 2 Datensätzen aus dem 1. Join verknüpft wird. Deshalb wird auch die Anzahl der Fahren verdoppelt. Dies wiederholt sich beim 3. Join.
Step4: Winter 2015
Step5: ``mysql
Step6: Es geht auch mit einem Subselect
Step7: Versicherung
Step8: Lösung
|
<ASSISTANT_TASK:>
Python Code:
%load_ext sql
%sql mysql://steinam:steinam@localhost/sommer_2015
%%sql
%sql select count(*) as AnzahlFahrten from fahrten
%sql select k.kd_id, k.`kd_firma`, k.`kd_plz`,
count(a.Au_ID) as AnzAuftrag,
count(f.f_id) as AnzFahrt,
sum(ts.ts_strecke) as SumStrecke
from kunde k left join auftrag a on k.`kd_id` = a.`au_kd_id`
left join fahrten f on a.`au_id` = f.`f_au_id`
left join teilstrecke ts on ts.`ts_f_id` = f.`f_id`
group by k.kd_id order by k.`kd_plz`
%sql select k.kd_id, k.`kd_firma`, k.`kd_plz`, a.`au_id` from kunde k left join auftrag a on k.`kd_id` = a.`au_kd_id` left join fahrten f on a.`au_id` = f.`f_au_id` left join teilstrecke ts on ts.`ts_f_id` = f.`f_id` order by k.`kd_plz`
%sql mysql://steinam:steinam@localhost/winter_2015
%%sql
select count(rechnung.`Rg_ID`), kunde.`Kd_Name` from rechnung
inner join kunde on `rechnung`.`Rg_KD_ID` = kunde.`Kd_ID`
inner join `zahlungsbedingung` on kunde.`Kd_Zb_ID` = `zahlungsbedingung`.`Zb_ID`
where `zahlungsbedingung`.`Zb_SkontoProzent` > 3.0
and year(`rechnung`.`Rg_Datum`) = 2015 group by Kunde.`Kd_Name`
%%sql
select kd.`Kd_Name`,
(select COUNT(*) from Rechnung as R
where R.`Rg_KD_ID` = KD.`Kd_ID` and year(R.`Rg_Datum`) = 2015) as Anzahl
from Kunde kd inner join `zahlungsbedingung`
on kd.`Kd_Zb_ID` = `zahlungsbedingung`.`Zb_ID`
and `zahlungsbedingung`.`Zb_SkontoProzent` > 3.0
%sql -- your code goes here
%sql mysql://steinam:steinam@localhost/versicherung_complete
%%sql
select min(`vv`.`Abschlussdatum`) as 'Erster Abschluss', `vv`.`Mitarbeiter_ID`
from `versicherungsvertrag` vv inner join mitarbeiter m
on vv.`Mitarbeiter_ID` = m.`ID`
where vv.`Mitarbeiter_ID` in ( select m.`ID` from mitarbeiter m
inner join Abteilung a
on m.`Abteilung_ID` = a.`ID`)
group by vv.`Mitarbeiter_ID`
result = _
result
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TL;DR Interpolated average precision is a common metric for classification tasks. However, interpolating linearly between operating points, as in scikit-learn's implementation, systematically rewards models that assign few discrete scores when there are more negative than positive examples. We propose a step-wise interpolation strategy that addresses this problem and reflects the literature, and we include code to compute average precision scores using this interpolation.
Step2: Precision and recall
Step3: However, this ignores the recall values associated with each threshold. To see why we should care, let's invent a new dataset consisting of 5 positive examples and 3 negative examples, and two models which assign scores as follows
Step4: Notice that there are always two operating points that are independent of our scores
Step5: Good classifiers will have both high precision and high recall, so we want to see operating points near the top-right of the grid. It's natural, then, to interpret our goal as to maximize the area under a precision–recall curve defined by all of the operating points
Step6: This suggests that there's a way to get a precision ≈ 0.8 with a recall of 0.2. But consider how we usually think about interpolation
Step7: For every value of recall greater than 0, the precision is exactly 0.1. Rather than interpolating linearly between adjacent operating points, we instead use a step function consisting of horizontal and vertical lines. Notice that it no longer matters how we define the precision when recall is exactly 0 since there is no area under that point.
Step8: If we continue adding observations where the positive and negative data are in the same ratio, the operating points get closer and closer to the horizontal line above
Step9: Scikit-learn
Step10: If we trusted this measure, we'd conclude that the do-nothing model was better than a model that can achieve a true positive rate of 10% at the expense of a false positive rate of only 0.1%. With uneven datasets like the one we're working on here (10% positives), it's incredibly difficult to add an operating point above the line joining the end points.
Step11: Now the second model is preferred to the first. (We should note that the area is still very low
Step12: Now let's invent a second model which simply rounds the first model's scores to the nearest tenth. So a score of 0.5321 is mapped to 0.5. We can use linear interpolation to compare the interpolated average precision for the two models
Step13: According to this metric, we should prefer the second model. It's hard to imagine a scenario where throwing away detail should improve a reasonably-good model, and so we should be skeptical about the efficacy of linear interpolation.
|
<ASSISTANT_TASK:>
Python Code:
__author__ = 'Nick Dingwall'
from average_precision_post_code import *
precision_scores = np.mean(
[1.00, 1.00, 1.00, 0.67, 0.75, 0.60,
0.67, 0.71, 0.62, 0.56, 0.50])
print("Mean precision: {:4.4f}".format(precision_scores))
%matplotlib inline
ranked_predictions = [1,1,0,1,0,1,1,0,0,0]
p, r = operating_points(ranked_predictions)
plot_recall_precision(p, r)
obs, constant_preds = generate_data_and_constant_predictions(
n=100, frac_positive=0.1)
plot_recall_precision_from_predictions(obs, constant_preds)
plot_recall_precision_from_predictions(
obs, constant_preds,
interpolation='linear',
title='Linear interpolation')
plot_recall_precision_from_predictions(
obs, constant_preds,
interpolation='step',
title='Step-wise interpolation')
some_noise = np.random.normal(
loc=0, scale=0.1, size=len(constant_preds))
noisy_preds = constant_preds + some_noise
plot_recall_precision_from_predictions(
obs, noisy_preds)
many_obs, many_constant_preds = generate_data_and_constant_predictions(
n=100000, frac_positive=0.1)
much_noise = np.random.normal(
loc=0, scale=0.1, size=len(many_constant_preds))
noisy_preds = many_constant_preds + much_noise
plot_recall_precision_from_predictions(many_obs, noisy_preds)
better_preds = copy(constant_preds)
better_preds[0] = 0.8
better_preds[-1] = 0.8
compare_recall_precisions_from_predictions(
obs,
OrderedDict([['Constant prediction', constant_preds],
['Improved prediction', better_preds]]),
interpolation='linear',
title='Linear interpolation')
compare_recall_precisions_from_predictions(
obs,
OrderedDict([['Constant prediction', constant_preds],
['Improved prediction', better_preds]]),
interpolation='step',
title='Step-wise interpolation')
y, scores, roc_auc = train_model_and_evaluate(
n_dim=50, n_samples=5000, frac_positive=0.05, mixing_factor=0.02)
print("Model achieves ROC AUC of {:4.4f}".format(roc_auc))
compare_recall_precisions_from_predictions(
y, OrderedDict([['Unrounded', scores],
['Rounded', np.round(scores, 1)]]),
interpolation='linear',
title='Linear interpolation')
compare_recall_precisions_from_predictions(
y, OrderedDict([['Unrounded', scores],
['Rounded', np.round(scores, 1)]]),
interpolation='step',
title='Step-wise interpolation')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The next step is to create a new column in our dataset that contains tokenized words with all the pre-processing steps.
Step2: Pre-processing is done. What other pre-processing steps might we use?
Step3: <a id='counts'></a>
Step4: Great! You know what to do now.
Step5: That's the dictionary method! You can do this with any dictionary you want, standard or you can create your own.
Step6: Now we can keep only those columns that occur in our positive words list. To do this, we'll first save a list of the columns names as a variable, and then only keep the elements of the list that occur in our positive words list. We'll then create a new dataframe keeping only those select columns.
|
<ASSISTANT_TASK:>
Python Code:
#import the necessary packages
import pandas
import nltk
from nltk import word_tokenize
import string
#read the Music Reviews corpus into a Pandas dataframe
df = pandas.read_csv("../Data/BDHSI2016_music_reviews.csv", encoding='utf-8', sep = '\t')
#view the dataframe
df
#first create a new column called "body_tokens" and transform to lowercase by applying the string function str.lower()
df['body'] = df['body'].apply(lambda x: ''.join([i for i in x if not i.isdigit()]))
df['body_tokens'] = df['body'].str.lower()
#tokenize
df['body_tokens'] = df['body_tokens'].apply(nltk.word_tokenize)
#view output
print(df['body_tokens'])
punctuations = list(string.punctuation)
#remove punctuation. Let's talk about that lambda x.
df['body_tokens'] = df['body_tokens'].apply(lambda x: [word for word in x if word not in punctuations])
#view output
print(df['body_tokens'])
df['token_count'] = df['body_tokens'].apply(lambda x: len(x))
print(df[['body_tokens','token_count']])
pos_sent = open("../Data/positive_words.txt", encoding='utf-8').read()
neg_sent = open("../Data/negative_words.txt", encoding='utf-8').read()
#view part of the pos_sent variable, to see how it's formatted.
print(pos_sent[:101])
#remember the split function? We'll split on the newline character (\n) to create a list
positive_words=pos_sent.split('\n')
negative_words=neg_sent.split('\n')
#view the first elements in the lists
print(positive_words[:10])
print(negative_words[:10])
positive_words
#count number of words in each list
print(len(positive_words))
print(len(negative_words))
#exercise code here
#import the function CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
countvec = CountVectorizer()
#create our document term matrix as a pandas dataframe
dtm_df = pandas.DataFrame(countvec.fit_transform(df.body).toarray(), columns=countvec.get_feature_names(), index = df.index)
dtm_df
#create a columns variable that is a list of all column names
columns = list(dtm_df)
columns
#create a new variable that contains only column names that are in our postive words list
pos_columns = [word for word in columns if word in positive_words]
pos_columns
#create a dtm from our dtm_df that keeps only positive sentiment columns
dtm_pos = dtm_df[pos_columns]
dtm_pos
#count the number of positive words for each document
dtm_pos['pos_count'] = dtm_pos.sum(axis=1)
#dtm_pos.drop('pos_count',axis=1, inplace=True)
dtm_pos['pos_count']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Shyft Environment
Step2: 2. Configuration of a SHyFT calibration
Step3: Now that we have the initial state, we'll run the calibration (this is not a strictly required step, but we use it later)
Step4: 3. Running a SHyFT calibration
Step5: 4. Inspecting the calibration results
Step6: Plotting simulated and observed discharge
Step7: 5. Changing parameters on-the-fly
Step8: In the following, we first set the hs.tx parameter to a higher, and then to a lower value compared to the value the calibration results suggest. We re-run the simulation, respicetively, and plot the results.
Step9: 6. Play with some sensitive parameters in real time
|
<ASSISTANT_TASK:>
Python Code:
# Pure python modules and jupyter notebook functionality
# first you should import the third-party python modules which you'll use later on
# the first line enables that figures are shown inline, directly in the notebook
%pylab inline
import os
import datetime as dt
import pandas as pd
from os import path
import sys
from matplotlib import pyplot as plt
# try to auto-configure the path, -will work in all cases where doc and data
# are checked out at same level
shyft_data_path = path.abspath("../../../shyft-data")
if path.exists(shyft_data_path) and 'SHYFT_DATA' not in os.environ:
os.environ['SHYFT_DATA']=shyft_data_path
# shyft should be available either by it's install in python
# or by PYTHONPATH set by user prior to starting notebook.
# This is equivalent to the two lines below
# shyft_path=path.abspath('../../../shyft')
# sys.path.insert(0,shyft_path)
# importing the shyft modules needed for running a calibration
from shyft.repository.default_state_repository import DefaultStateRepository
from shyft.orchestration.configuration.yaml_configs import YAMLCalibConfig, YAMLSimConfig
from shyft.orchestration.simulators.config_simulator import ConfigCalibrator, ConfigSimulator
# conduct a configured simulation first.
config_file_path = os.path.abspath("../nea-example/nea-config/neanidelva_simulation.yaml")
cfg = YAMLSimConfig(config_file_path, "neanidelva")
simulator = ConfigSimulator(cfg)
# run the model, and we'll just pull the `api.model` from the `simulator`
simulator.run()
state = simulator.region_model.state
# set up configuration using *.yaml configuration files
config_file_path = os.path.abspath("./nea-config/neanidelva_calibration.yaml") # here is the *.yaml file
cfg = YAMLCalibConfig(config_file_path, "neanidelva")
# initialize an instance of the orchestration's ConfigCalcalibrator class, which has all the functionality needed
# to run a calibration using the above initiated configuration
calib = ConfigCalibrator(cfg)
n_cells = calib.region_model.size()
state_repos = DefaultStateRepository(calib.region_model) # Notice that this repository needs the real model
# so that it's able to generate a precise
# default state-with-id vector for this
# specific model
# once the calibrator is set up, all you need to do is running the calibration...
# the calibrated parameters are stored in a model.yaml.
results = calib.calibrate(cfg.sim_config.time_axis, state_repos.get_state(0).state_vector,
cfg.optimization_method['name'],
cfg.optimization_method['params'])
# Get NSE of calibrated run:
result_params = []
for i in range(results.size()):
result_params.append(results.get(i))
print("Final NSE =", 1-calib.optimizer.calculate_goal_function(result_params))
# Check out the calibrated parameters.
diff = 1.0E-3
print("{0:30s} {1:10s}".format("PARAM-NAME", "CALIB-VALUE"))
for i in range(results.size()):
print("{0:30s} {1:10f}".format(results.get_name(i), results.get(i)))
# get the target vector and discharge statistics from the configured calibrator
target_obs = calib.tv[0]
disch_sim = calib.region_model.statistics.discharge(target_obs.catchment_indexes).average(target_obs.ts.time_axis)
disch_obs = target_obs.ts.values
ts_timestamps = [dt.datetime.utcfromtimestamp(p.start) for p in target_obs.ts.time_axis]
# plot up the results
fig, ax = plt.subplots(1, figsize=(15,10))
ax.plot(ts_timestamps, disch_sim.values, lw=2, label = "sim")
ax.plot(ts_timestamps, disch_obs, lw=2, ls='--', label = "obs")
ax.set_title("observed and simulated discharge")
ax.legend()
ax.set_ylabel("discharge [m3 s-1]")
parameters = calib.region_model.get_region_parameter() # fetching parameters from the simulator object
print(u"Calibrated rain/snow threshold temp: {} C".format(parameters.gs.tx)) # print current value of hs.tx
calib.optimizer.calculate_goal_function(result_params) # reset the parameters to the values of the calibration
parameters.gs.tx = 4.0 # setting a higher value for tx
s_init = state.extract_state([])
# type(state)
# s0=state_repos.get_state(0)
# s0.state_vector
# state.apply_state(s0, [])
calib.run(state=s_init) # rerun the model, with new parameter
disch_sim_p_high = calib.region_model.statistics.discharge(target_obs.catchment_indexes).average(target_obs.ts.time_axis) # fetch discharge ts
parameters.gs.tx = -4.0 # setting a higher value for tx
calib.run(state=s_init) # rerun the model, with new parameter
disch_sim_p_low = calib.region_model.statistics.discharge(target_obs.catchment_indexes).average(target_obs.ts.time_axis) # fetch discharge ts
fig, ax = plt.subplots(1, figsize=(15,10))
ax.plot(ts_timestamps, disch_sim.values, lw=2, label = "calib")
ax.plot(ts_timestamps, disch_sim_p_high.values, lw=2, label = "high")
ax.plot(ts_timestamps, disch_sim_p_low.values, lw=2, label = "low")
ax.plot(ts_timestamps, disch_obs, lw=2, ls='--', label = "obs")
ax.set_title("investigating parameter gs.tx")
ax.legend()
ax.set_ylabel("discharge [m3 s-1]")
s_init = state.extract_state([])
# reset the max water parameter
parameters.gs.max_water = 1.0 # setting a higher value for tx
calib.run(state=s_init) # rerun the model, with new parameter
disch_sim_p_high = calib.region_model.statistics.discharge(target_obs.catchment_indexes).average(target_obs.ts.time_axis) # fetch discharge ts
parameters.gs.max_water = .001 # setting a higher value for tx
calib.run(state=s_init) # rerun the model, with new parameter
disch_sim_p_low = calib.region_model.statistics.discharge(target_obs.catchment_indexes).average(target_obs.ts.time_axis) # fetch discharge ts
# plot the results
fig, ax = plt.subplots(1, figsize=(15,10))
ax.plot(ts_timestamps, disch_sim.values, lw=2, label = "calib")
ax.plot(ts_timestamps, disch_sim_p_high.values, lw=2, label = "high")
ax.plot(ts_timestamps, disch_sim_p_low.values, lw=2, label = "low")
ax.plot(ts_timestamps, disch_obs, lw=2, ls='--', label = "obs")
ax.set_title("investigating parameter gs.max_water")
ax.legend()
ax.set_ylabel("discharge [m3 s-1]")
# at this point we could look at the time series for every cell. Or plot a spatial map...
# TODO: https://data-dive.com/cologne-bike-rentals-interactive-map-bokeh-dynamic-choropleth
from ipywidgets import interact
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
from bokeh.palettes import viridis
from bokeh.models.sources import ColumnDataSource
output_notebook()
p = figure(title='Parameters', plot_height=300, plot_width=800)
pallette = viridis(10)
ts_timestamps = [dt.datetime.utcfromtimestamp(ta.start) for ta in target_obs.ts.time_axis]
def plot_simobs(calib):
model = calib.region_model
disch_sim = model.statistics.discharge(calib.tv[0].catchment_indexes).average(calib.tv[0].ts.time_axis)
disch_obs = calib.tv[0].ts
data = {
'time': ts_timestamps,
'sim': model.statistics.discharge(calib.tv[0].catchment_indexes).average(calib.tv[0].ts.time_axis).values,
'obs': calib.tv[0].ts.values
}
source = ColumnDataSource(data)
p.line('time', 'sim', source=source, line_color=pallette[0])
p.line('time', 'obs', source=source, line_color='red')
return p
def update(tx=0):
parameters.gs.tx = tx
calib.run(state=s_init)
plot_simobs(calib)
push_notebook()
model = calib.region_model
p = plot_simobs(calib)
show(p, notebook_handle=True)
interact(update, tx=np.arange(-3.,4.))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, let's find if there exists clusters(connected components)
Step2: Visualization
Step3: Power Law Property
Step4: Directed Graphs
|
<ASSISTANT_TASK:>
Python Code:
import copy
# open the file you have downloaded
# these files are organized
file = open("amazon.txt")
# this returns an array with one entry for each line ni the file
lines = file.readlines()
print len(lines)
# Note: the format of the snap files is to list a node (identified by a unique number)
# and all of the nodes it links to (also identified by numbers), on the same line, separated by tabs.
# construct the graph
# a set is an unordered collection of unique elements
edges = set()
# this will store our nodes
nodes = {}
# divide the line into the node and all of its edges
# for each line in the file that was loaded in
for line in lines:
# divide the line into the node and all of its edges
data = line.split()
a = int(data[0])
b = int(data[1])
# add the edge
edges.add((a, b))
# update the count for the number of times we've seen each node
nodes[a] = nodes.get(a, -1) + 1
nodes[b] = nodes.get(b, -1) + 1
print "number of unique edges"
print len(edges)
print "number of unique nodes"
print len(nodes)
# get the degrees of each node in a set of edges
def get_degrees(edges):
degree_counts={}
# for each pair of nodes (edge)
for i,j in edges:
# increment the count for the number of edges connected
# to each node by one
degree_counts[i] = degree_counts.get(i, 0) + 1
degree_counts[j] = degree_counts.get(j, 0) + 1
return degree_counts
# Delete all nodes in delete_nodes from edges
def delete_node(edges, delete_nodes):
# construct a new set of edges
new_edges = []
print "# of nodes to be deleted", len(delete_nodes)
# loop through all the current edges
for i, j in edges:
# if an edges two nodes are not in the
# set of nodes to be deleted
if i not in delete_nodes and j not in delete_nodes:
# append that edge to our new edges
new_edges.append((i,j))
return new_edges
# kcore algorithm
# We run the kcore algorithm to delete all
# the nodes whose cores are less than k
# returns a new set of edges and nodes
# including only those in the k core.
def kcore(edges, k):
# make a complete copy of the edges so we can delete or change
# things without messing up our original
edges = copy.deepcopy(edges)
# now for each pair of nodes, count the number of
degree_counts = get_degrees(edges)
# sort the nodes by degree and return
# only the node numbers (not their degree)
sorted_nodes = sorted(degree_counts, key = degree_counts.get)
print "largest degree: ", degree_counts[sorted_nodes[0]]
# repeatedly delete all nodes with degrees < k to find the k core
# if we run out of nodes, or the largest count is < k we should stop
while ((len(sorted_nodes) > 0) and (degree_counts[sorted_nodes[0]]<k)):
# collect nodes with degrees < k in to_delete
to_delete = set()
for node in sorted_nodes:
if degree_counts[node]<k:
to_delete.add(node)
else:
break
# delete all edges that include those nodes
edges = delete_node(edges, to_delete)
print "# of edges left:",len(edges)
# recount the degrees for this (smaller) graph
degree_counts = get_degrees(edges)
# resort the nodes
sorted_nodes = sorted(degree_counts, key = degree_counts.get)
return edges, sorted_nodes
core_edges, core_nodes=kcore(edges, 3)
# We can use this method to create
# an adjacency matrix to represent the graph
def build_neighborhood(edges, nodes):
neighborhood = {}
for node in nodes:
# create a place to store the neighbors
neighborhood[node]=set()
for edge in edges:
# if either side of the edge contains node
# add the other side as a neighbor
if node == edge[0]:
neighborhood[node].add(edge[1])
if node == edge[1]:
neighborhood[node].add(edge[0])
return neighborhood
# This method is used to discover the connected components
# The basic idea is Breadth First Search
# We start from a node and find all the nodes it can reach
# In this way we can get a cluster of nodes which is called
# a connected component
# to start, we pass in the edges,
def get_connected_components(edges, neighborhood, nodes):
result = []
nodes = set(nodes)
# keep track of what we've seen
visited = set()
# loop until there are no more nodes
while nodes:
# grab the first one
node = nodes.pop()
# create a new set for it
component = set()
# start searching from node
queue = [node]
while queue:
# pick a node and mark as visited
node = queue.pop(0)
visited.add(node)
# add it to our connected component
component.add(node)
# find all its neighbors
neighbors = neighborhood[node]
# add them to the queue (if we haven't seen them before)
for neighbor in neighbors:
if neighbor not in visited:
nodes.discard(neighbor)
queue.append(neighbor)
result.append(component)
return result
neighborhood = build_neighborhood(core_edges, core_nodes)
ret = get_connected_components(core_edges, neighborhood, core_nodes)
print "# of connected components",len(ret)
import networkx as nx
from networkx.readwrite import json_graph
import json
# create a graph and add al the edges
G=nx.Graph()
for edge in edges:
G.add_edge(edge[0],edge[1])
nld = json_graph.node_link_data(G)
# We store the data in a json file
# So the javascript code can read it
json.dump(nld, open('force.json','w'))
from IPython.display import IFrame
# IPython Notebook can serve files and display them into
# inline frames. Prepend the path with the 'files' prefix.
viz_file = 'force.html'
IFrame(viz_file, width=700, height=550)
# code to analyze undirected graphs
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
# get the degrees for each node (again)
nodes = get_degrees(edges)
v = nodes.values()
# this ensures that we don't have any values more than once
noRep = list(set(v))
noRep.sort()
x = []
y = []
for count in noRep:
# f is the number of times this value occurs
f = v.count(count)
x.append(count)
y.append(f)
figure()
loglog(x, y, '*')
xlabel('x')
ylabel('y')
title('power law plot')
show()
# code to analyze directed graphs
file = open("twitter.txt")
lines = file.readlines()
edges = set()
nodes_indegree = {}
nodes_outdegree = {}
# construct the indegree info and edges
# very similar to what we did for directed graphs
for line in lines:
data = line.split()
source = int(data[0])
endpoint = int(data[1])
# add the edge
edges.add((source, endpoint))
# update the count for the number of times we've seen each node
nodes_indegree[source] = nodes_indegree.get(source, -1) + 1
nodes_outdegree[endpoint] = nodes_outdegree.get(endpoint, -1) + 1
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
# now show this to the viewer
v_indegree = nodes_indegree.values()
v_outdegree = nodes_outdegree.values()
noRep_indegree = list(set(v_indegree))
noRep_outdegree = list(set(v_outdegree))
noRep_indegree.sort()
noRep_outdegree.sort()
x_indegree = []
y_indegree = []
x_outdegree = []
y_outdegree = []
for count in noRep_indegree:
f = v_indegree.count(count)
x_indegree.append(count)
y_indegree.append(f)
for count in noRep_outdegree:
f = v_outdegree.count(count)
x_outdegree.append(count)
y_outdegree.append(f)
figure()
loglog(x_indegree, y_indegree, '*')
xlabel('x')
ylabel('y')
title('indegree distribution')
show()
figure()
loglog(x_outdegree, y_outdegree, '*')
xlabel('x')
ylabel('y')
title('outdegree distribution')
show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, let's use OpenMM to run some dynamics on the 3D potential energy function
Step2: Okay, let's run the dynamics. The first plot below shows the $x$, $y$ and $z$ coordinate vs. time for the trajectory, and
Step3: Note that the variance of $x$ is much lower than the variance in $y$ or $z$, despite it's bi-modal distribution.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import simtk.openmm as mm
from msmbuilder.decomposition import tICA, PCA
def propagate(n_steps=10000):
"Simulate some dynamics"
system = mm.System()
system.addParticle(1)
force = mm.CustomExternalForce('5*(x-1)^2*(x+1)^2 + y^2 + z^2')
force.addParticle(0, [])
system.addForce(force)
integrator = mm.LangevinIntegrator(500, 1, 0.02)
context = mm.Context(system, integrator)
context.setPositions([[0, 0, 0]])
context.setVelocitiesToTemperature(500)
x = np.zeros((n_steps, 3))
for i in range(n_steps):
x[i] = context.getState(getPositions=True).getPositions(asNumpy=True)._value
integrator.step(1)
return x
trajectory = propagate(10000)
ylabels = ['x', 'y', 'z']
for i in range(3):
plt.subplot(3, 1, i+1)
plt.plot(trajectory[:, i])
plt.ylabel(ylabels[i])
plt.xlabel('Simulation time')
plt.show()
# fit the two models
tica = tICA(n_components=1, lag_time=100)
pca = PCA(n_components=1)
tica.fit([trajectory])
pca.fit([trajectory])
plt.subplot(1,2,1)
plt.title('1st tIC')
plt.bar([1,2,3], tica.components_[0], color='b')
plt.xticks([1.5,2.5,3.5], ['x', 'y', 'z'])
plt.subplot(1,2,2)
plt.title('1st PC')
plt.bar([1,2,3], pca.components_[0], color='r')
plt.xticks([1.5,2.5,3.5], ['x', 'y', 'z'])
plt.show()
print('1st tIC', tica.components_ / np.linalg.norm(tica.components_))
print('1st PC ', pca.components_ / np.linalg.norm(pca.components_))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data_set variable is an NwbDataSet instance, which has some methods we can use to access the injected current stimulus waveform and the voltage response waveform for all experimental sweeps. Let's pull one sweep out and plot it.
Step2: Cell Morphology Reconstructions
Step3: The AllenSDK contains a module that makes it easier to work with the SWC files. We'll see how the data is contained in the file by looking at the first node.
Step4: Note that the type field refers to the type of neuronal compartment. The values can be 1 for the soma, 2 for the axon, 3 for dendrites, and 4 for apical dendrites (if present).
Step5: Electrophysiology Features
Step6: That's how to get all the ephys features for a given specimen - what if we want a particular feature for all cells?
Step7: Let's use numpy to fit a regression line to these data and plot it.
Step8: It looks like there may be roughly two clusters in the data above. Maybe they relate to whether the cells are presumably excitatory (spiny) cells or inhibitory (aspiny) cells. Let's query the API and split up the two sets to see.
Step9: Morphology Features
Step10: Computing Electrophysiology Features
Step11: A list comprehension is an easy way to pull out the spike times.
|
<ASSISTANT_TASK:>
Python Code:
from allensdk.core.cell_types_cache import CellTypesCache
# Instantiate the CellTypesCache instance. The manifest_file argument
# tells it where to store the manifest, which is a JSON file that tracks
# file paths. If you supply a relative path (like this), it will go
# into your current working directory
ctc = CellTypesCache(manifest_file='cell_types/manifest.json')
# this saves the NWB file to 'cell_types/specimen_464212183/ephys.nwb'
data_set = ctc.get_ephys_data(464212183)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
sweep_number = 30
sweep_data = data_set.get_sweep(sweep_number)
index_range = sweep_data["index_range"]
i = sweep_data["stimulus"][0:index_range[1]+1] # in A
v = sweep_data["response"][0:index_range[1]+1] # in V
i *= 1e12 # to pA
v *= 1e3 # to mV
sampling_rate = sweep_data["sampling_rate"] # in Hz
t = np.arange(0, len(v)) * (1.0 / sampling_rate)
plt.style.use('ggplot')
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(t, v, color='black')
axes[1].plot(t, i, color='gray')
axes[0].set_ylabel("mV")
axes[1].set_ylabel("pA")
axes[1].set_xlabel("seconds")
from allensdk.core.cell_types_cache import CellTypesCache
ctc = CellTypesCache()
# this downloads metadata for all cells with reconstructions
cells = ctc.get_cells(require_reconstruction=True)
print "Cells with reconstructions: ", len(cells)
# download and open an SWC file
cell_id = 464212183
morphology = ctc.get_reconstruction(cell_id)
print morphology
compartment_list = morphology.compartment_list
print compartment_list[0]
fig, axes = plt.subplots(1, 2, sharey=True, sharex=True)
axes[0].set_aspect('equal', 'box-forced')
axes[1].set_aspect('equal', 'box-forced')
# Make a line drawing of x-y and y-z views
for n in morphology.compartment_list:
child_nodes = [c for c in morphology.compartment_list if c['id'] in n['children']]
for c in child_nodes:
axes[0].plot([n['x'], c['x']], [n['y'], c['y']], color='black')
axes[1].plot([n['z'], c['z']], [n['y'], c['y']], color='black')
axes[0].set_ylabel('y')
axes[0].set_xlabel('x')
axes[1].set_xlabel('z')
# download all electrophysiology features for all cells
ephys_features = ctc.get_ephys_features()
# filter down to a specific cell
specimen_id = 464212183
cell_ephys_features = [f for f in ephys_features if f['specimen_id'] == specimen_id]
print cell_ephys_features
updown = np.array([f['upstroke_downstroke_ratio_short_square'] for f in ephys_features], dtype=float)
fasttrough = np.array([f['fast_trough_v_long_square'] for f in ephys_features], dtype=float)
plt.figure()
plt.scatter(fasttrough, updown, color='#2ca25f')
plt.ylabel("upstroke-downstroke ratio")
plt.xlabel("fast trough depth (mV)")
A = np.vstack([fasttrough, np.ones_like(updown)]).T
print "First 5 rows of A:"
print A[:5, :]
m, c = np.linalg.lstsq(A, updown)[0]
print "m", m, "c", c
plt.figure()
plt.scatter(fasttrough, updown, color='#2ca25f')
plt.plot(fasttrough, m * fasttrough + c, c='gray')
plt.ylabel("upstroke-downstroke ratio")
plt.xlabel("fast trough depth (mV)")
cells = ctc.get_cells()
cell_index = { c['id']: c for c in cells}
dendrite_types = ['spiny', 'aspiny']
data = {}
# group fast trough depth and upstroke downstroke ratio values by cell dendrite type
for dendrite_type in dendrite_types:
type_features = [f for f in ephys_features if cell_index[f['specimen_id']]['dendrite_type'] == dendrite_type]
data[dendrite_type] = {
"fasttrough": [f['fast_trough_v_long_square'] for f in type_features],
"updown": [f['upstroke_downstroke_ratio_short_square'] for f in type_features],
}
plt.figure()
for a_type, color in zip(dendrite_types, ["#d95f02", "#7570b3"]):
plt.scatter(data[a_type]['fasttrough'], data[a_type]['updown'], color=color, label=a_type)
plt.legend(loc='best')
plt.ylabel("upstroke-downstroke ratio")
plt.xlabel("fast trough depth (mV)")
# download all morphology features for cells with reconstructions
morphology_features = ctc.get_morphology_features()
# or download both morphology and ephys features
# this time we'll ask the cache to return a pandas dataframe
all_features = ctc.get_all_features(dataframe=True, require_reconstruction=True)
all_features
from allensdk.ephys.feature_extractor import EphysFeatureExtractor
sweep_number = 35
sweep_data = data_set.get_sweep(sweep_number)
index_range = sweep_data["index_range"]
i = sweep_data["stimulus"][0:index_range[1]+1] # in A
v = sweep_data["response"][0:index_range[1]+1] # in V
i *= 1e12 # to pA
v *= 1e3 # to mV
sampling_rate = sweep_data["sampling_rate"] # in Hz
t = np.arange(0, len(v)) * (1.0 / sampling_rate)
fx = EphysFeatureExtractor()
stim_start = 1.0
stim_duration = 1.0
fx.process_instance("", v, i, t, stim_start, stim_duration, "")
feature_data = fx.feature_list[0].mean
print "Avg spike width: {:.2f} ms".format(feature_data['width'])
print "Avg spike threshold: {:.1f} mV".format(feature_data["threshold"])
import pprint
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(feature_data["spikes"][0])
spike_times = [s["t"] for s in feature_data["spikes"]]
print spike_times[:5]
plt.figure()
plt.plot(t, v, color='black')
min_v = v.min()
min_v -= 5.0
plt.scatter(spike_times, np.ones(len(spike_times)) * min_v, c='r')
plt.xlim(0.9, 1.2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's display the look up table with mapping from class number to the name of the PASCAL VOC class
Step2: Now, let's create a contour for our segmentation to make it look like an actual sticker.
Step3: Now, let's repeat the same thing for another image. I will duplicate the code, because I am lazy.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import division
import os
import sys
import tensorflow as tf
import skimage.io as io
import numpy as np
sys.path.append("/home/aakash-sinha/Documents/Tensorflow/tf-image-segmentation/")
sys.path.append("/home/aakash-sinha/Documents/Tensorflow/models/slim/")
fcn_16s_checkpoint_path = '/home/aakash-sinha/Documents/Tensorflow/tf-image-segmentation/tf_image_segmentation/models/fcn_8s_checkpoint/model_fcn8s_final.ckpt'
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
slim = tf.contrib.slim
from tf_image_segmentation.models.fcn_8s import FCN_8s
from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input
from tf_image_segmentation.utils.pascal_voc import pascal_segmentation_lut
number_of_classes = 21
#image_filename = 'me.jpg'
image_filename = '1.jpg'
# image_filename = 'cat.jpg'
# image_filename = 'small_cat.jpg'
image_filename_placeholder = tf.placeholder(tf.string)
feed_dict_to_use = {image_filename_placeholder: image_filename}
image_tensor = tf.read_file(image_filename_placeholder)
image_tensor = tf.image.decode_jpeg(image_tensor, channels=3)
# Fake batch for image and annotation by adding
# leading empty axis.
image_batch_tensor = tf.expand_dims(image_tensor, axis=0)
# Be careful: after adaptation, network returns final labels
# and not logits
FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)
pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
number_of_classes=number_of_classes,
is_training=False)
# The op for initializing the variables.
initializer = tf.local_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(initializer)
saver.restore(sess, "/home/aakash-sinha/Documents/Tensorflow/tf-image-segmentation/tf_image_segmentation/models/fcn_8s_checkpoint/model_fcn8s_final.ckpt")
image_np, pred_np = sess.run([image_tensor, pred], feed_dict=feed_dict_to_use)
io.imshow(image_np)
io.show()
io.imshow(pred_np.squeeze())
io.show()
pascal_segmentation_lut()
# Eroding countour
import skimage.morphology
prediction_mask = (pred_np.squeeze() == 15)
# Let's apply some morphological operations to
# create the contour for our sticker
cropped_object = image_np * np.dstack((prediction_mask,) * 3)
square = skimage.morphology.square(5)
temp = skimage.morphology.binary_erosion(prediction_mask, square)
negative_mask = (temp != True)
eroding_countour = negative_mask * prediction_mask
eroding_countour_img = np.dstack((eroding_countour, ) * 3)
cropped_object[eroding_countour_img] = 248
png_transparancy_mask = np.uint8(prediction_mask * 255)
image_shape = cropped_object.shape
png_array = np.zeros(shape=[image_shape[0], image_shape[1], 4], dtype=np.uint8)
png_array[:, :, :3] = cropped_object
png_array[:, :, 3] = png_transparancy_mask
io.imshow(cropped_object)
io.imsave('output_image.png', png_array)
%matplotlib inline
from __future__ import division
import os
import sys
import tensorflow as tf
import skimage.io as io
import numpy as np
sys.path.append("tf-image-segmentation/")
sys.path.append("/home/dpakhom1/workspace/my_models/slim/")
fcn_16s_checkpoint_path = '/home/dpakhom1/tf_projects/segmentation/model_fcn8s_final.ckpt'
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
slim = tf.contrib.slim
from tf_image_segmentation.models.fcn_8s import FCN_8s
from tf_image_segmentation.utils.inference import adapt_network_for_any_size_input
from tf_image_segmentation.utils.pascal_voc import pascal_segmentation_lut
number_of_classes = 21
image_filename = 'small_cat.jpg'
image_filename_placeholder = tf.placeholder(tf.string)
feed_dict_to_use = {image_filename_placeholder: image_filename}
image_tensor = tf.read_file(image_filename_placeholder)
image_tensor = tf.image.decode_jpeg(image_tensor, channels=3)
# Fake batch for image and annotation by adding
# leading empty axis.
image_batch_tensor = tf.expand_dims(image_tensor, axis=0)
# Be careful: after adaptation, network returns final labels
# and not logits
FCN_8s = adapt_network_for_any_size_input(FCN_8s, 32)
pred, fcn_16s_variables_mapping = FCN_8s(image_batch_tensor=image_batch_tensor,
number_of_classes=number_of_classes,
is_training=False)
# The op for initializing the variables.
initializer = tf.local_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(initializer)
saver.restore(sess, "/home/dpakhom1/tf_projects/segmentation/model_fcn8s_final.ckpt")
image_np, pred_np = sess.run([image_tensor, pred], feed_dict=feed_dict_to_use)
io.imshow(image_np)
io.show()
io.imshow(pred_np.squeeze())
io.show()
# Eroding countour
import skimage.morphology
prediction_mask = (pred_np.squeeze() == 8)
# Let's apply some morphological operations to
# create the contour for our sticker
cropped_object = image_np * np.dstack((prediction_mask,) * 3)
square = skimage.morphology.square(5)
temp = skimage.morphology.binary_erosion(prediction_mask, square)
negative_mask = (temp != True)
eroding_countour = negative_mask * prediction_mask
eroding_countour_img = np.dstack((eroding_countour, ) * 3)
cropped_object[eroding_countour_img] = 248
png_transparancy_mask = np.uint8(prediction_mask * 255)
image_shape = cropped_object.shape
png_array = np.zeros(shape=[image_shape[0], image_shape[1], 4], dtype=np.uint8)
png_array[:, :, :3] = cropped_object
png_array[:, :, 3] = png_transparancy_mask
io.imshow(cropped_object)
io.imsave('sticker_cat.png', png_array)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Word counting
Step4: Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts.
Step6: Write a function sort_word_counts that return a list of sorted word counts
Step7: Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt
Step8: Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research...
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
def tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t'):
Split a string into a list of words, removing punctuation and stop words.
w = []
for line in s.splitlines(): #uses the splitlines function to split each of the lines in the input
w.extend(line.split(' '))
w = [''.join(filter(lambda c: c not in punctuation, word)) for word in w] # filters out the punctuation
if isinstance(stop_words, str):
stop_words = stop_words.split(' ')
if stop_words is not None: #calls the if statment to pull out the words if they are not nothing
w = [word for word in w if word not in stop_words]
w = [word.lower() for word in w if word]
return w #ends the function
tokenize(s='no @ the range, randy .. is !a',stop_words='the', punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t') #my check to see if it was working
assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \
['this', 'way', 'that', 'things', 'will', 'end']
wasteland =
APRIL is the cruellest month, breeding
Lilacs out of the dead land, mixing
Memory and desire, stirring
Dull roots with spring rain.
assert tokenize(wasteland, stop_words='is the of and') == \
['april','cruellest','month','breeding','lilacs','out','dead','land',
'mixing','memory','desire','stirring','dull','roots','with','spring',
'rain']
def count_words(data):
wc = {}
for word in data:
if word in wc:
wc[word] += 1
else:
wc[word] = 1
return wc
assert count_words(tokenize('this and the this from and a a a')) == \
{'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2}
def sort_word_counts(wc):
Return a list of 2-tuples of (word, count), sorted by count descending.
return list(sorted(wc.items(),key=lambda x:x[1],reverse=True))
assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \
[('a', 4), ('this', 3), ('and', 2), ('the', 1)]
with open('mobydick_chapter1.txt') as ishmael:
raw=ishmael.read()
l=tokenize(raw,stop_words='the of and a to in is it that as')
c=count_words(l)
swc=sort_word_counts(c)
assert swc[0]==('i',43)
assert len(swc)==848
# YOUR CODE HERE
raise NotImplementedError()
assert True # use this for grading the dotplot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MPI Modes
Step2: PHOEBE determines whether the current script is running within an MPI environment by checking for environment variables set by mpirun/mpiexec. If you run into any issues with PHOEBE not behaving as expected, check to see whether PHOEBE thinks its within mpirun.
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.3,<2.4"
import phoebe
print(phoebe.mpi.enabled)
print(phoebe.mpi.mode)
phoebe.mpi_on()
print(phoebe.mpi.enabled)
print(phoebe.mpi.mode)
print(phoebe.mpi.myrank)
print(phoebe.mpi.nprocs)
print(phoebe.mpi.within_mpirun)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Suppose we compute PageRank with a β of 0.7, and we introduce the additional constraint that the sum of the PageRanks of the three pages must be 3, to handle the problem that otherwise any multiple of a solution will also be a solution. Compute the PageRanks a, b, and c of the three pages A, B, and C, respectively.
Step2: Question 2
Step3: Suppose we compute PageRank with β=0.85. Write the equations for the PageRanks a, b, and c of the three pages A, B, and C, respectively. Then, identify the correct equations representing a, b and c.
Step4: Question 3
Step5: Assuming no "taxation," compute the PageRanks a, b, and c of the three pages A, B, and C, using iteration, starting with the "0th" iteration where all three pages have rank a = b = c = 1. Compute as far as the 5th iteration, and also determine what the PageRanks are in the limit.
Step6: Question 4
Step7: Question 5
Step8: TSPR(1) = 0.3576
Step9: There, k "second-tier" nodes act as intermediaries. The target page t has only to link to the k second-tier pages, and each of those pages links to m/k of the m supporting pages. Each of the supporting pages links only to t (although most of these links are not shown). Suppose the taxation parameter is β = 0.85, and x is the amount of PageRank supplied from outside to the target page. Let n be the total number of pages in the Web. Finally, let y be the PageRank of target page t. If we compute the formula for y in terms of k, m, and n, we get a formula with the form
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
Image(filename='pagerank1.jpeg')
import numpy as np
# Adjacency matrix
# m1 = [ 0, 0, 0]
# [0.5, 0, 0]
# [0.5, 1, 1]
m1 = np.matrix([[0, 0, 0],[0.5, 0, 0],[0.5, 1, 1]])
beta = 0.7
# r = beta * m1 * r + ((1-beta)/N)
def r_p(r):
return beta * m1 * r + np.matrix([0.1,0.1,0.1]).T
r = np.matrix([1.0/3,1.0/3,1.0/3]).T
for i in range(1000):
r = r_p(r)
print "Final PageRank: \n" + str(r*3)
a = r[0] * 3
b = r[1] * 3
c = r[2] * 3
print 'a = ', a
print 'b = ', b
print 'c = ', c
print 'a + b = ', a + b
print 'b + c = ', b + c
print 'a + c = ', a + c
Image(filename='pagerank2.jpeg')
import numpy as np
# Adjacency matrix
# m2 = [ 0, 0, 1]
# [0.5, 0, 0]
# [0.5, 1, 0]
m2 = np.matrix([[0, 0, 1],[0.5, 0, 0],[0.5, 1, 0]])
beta =0.85
def r_p(r):
return beta * m2 * r + np.matrix([0.05,0.05,0.05]).T
r = np.matrix([1.0/3,1.0/3,1.0/3]).T
for i in range(1000):
r = r_p(r)
print "Final PageRank: \n" + str(r)
a = r[0]
b = r[1]
c = r[2]
print "0.95a = ", 0.95*a, "= 0.9c + 0.05b = ", 0.9*c + 0.05*b
print "0.95b = ", 0.95*b, "= 0.475a + 0.05c = ", 0.475*a + 0.05*c
print "0.95c = ", 0.95*c, "= 0.9b + 0.475a = ", 0.9*b + 0.475*a
Image(filename='pagerank2.jpeg')
import numpy as np
# Adjacency matrix
# m3 = [ 0, 0, 1]
# [0.5, 0, 0]
# [0.5, 1, 0]
m3 = np.matrix([[0, 0, 1],[0.5, 0, 0],[0.5, 1, 0]])
beta = 1
r = np.matrix([1,1,1]).T
for i in range(50):
r = m3.dot(r)
print i+1
print r
print "Final PageRank: \n" + str(r)
Image(filename='pagerank4.jpg')
import numpy as np
# Function to normalize all values so that the largest value is 1
def norm(Matrix):
return Matrix/float(Matrix.max())
def estimate(L,h):
# To estimate of the authority vector a = LTh
#a = L.T*h
a = np.dot(L.T, h)
# Normalize a by dividing all values so the largest value is 1
a = norm(a)
# To estimate of the hubbiness vector h = La
#h = L*a
h = np.dot(L, a)
# Normalize h by dividing all values so the largest value is 1
h = norm(h)
return a,h
# The vector h is (the transpose of) [1,1,1,1]
h = np.matrix([1,1,1,1]).T
# The link graph: 1->2; 1->3; 2->1; 3->4; 4->3
L = np.matrix([[0,1,1,0],
[1,0,0,0],
[0,0,0,1],
[0,0,1,0]])
# After step 1
a,h = estimate(L,h)
print "After step 1:"
print "authority:", np.round(a.T, decimals=3)
print "hubbiness:", np.round(h.T, decimals=3)
# After step 2 (repeat of step 1)
a,h = estimate(L,h)
print "Final estimate:"
print "authority:", np.round(a.T, decimals=3)
print "hubbiness:", np.round(h.T, decimals=3)
Image(filename='pagerank4.jpg')
import numpy as np
A = np.matrix([[ 0.0, 0.5, 0.5, 0.0 ],
[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],]).T
w = 1.0/3.0
B = np.matrix([[2*w, 2*w, 2*w, 2*w],
[w, w, w, w],
[0, 0, 0, 0],
[0, 0, 0, 0]])
beta = 0.7
r = np.ones((A.shape[0], 1)) / A.shape[0]
for i in range(50):
r = beta * np.dot(A, r) + (1 - beta) * np.dot(B, r)
print i+1
print r
Image(filename='pagerank5.jpeg')
import numpy as np
import math
beta = 0.85
a = 1.0/ (1 - np.power(beta, 3))
b = beta / (1.0 + beta + np.power(beta, 2))
c = np.power(beta, 2)/ (1.0 + beta + np.power(beta, 2))
print 'a = %f , b = %f , c = %f' % (a, b, c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Query methods
Step2: Get available months in a year
Step3: Get available days in a given year and month
Step4: Get available radars in a given year, month, and day
Step5: Query for available scans
Step6: Get all scans for a radar between a start and end time
Step7: Downloading Files
Step8: The download method returns a DownloadResult object. The success attribute returns a list of LocalNexradFile objects that were successfully downloaded. There is also an iter_success method that creates a generator for easily looping through the objects.
Step9: You can check for any failed downloads using the failed_count attribute. You can get a list of the failed AwsNexradFile objects by calling the failed attribute. There is also a generator method called iter_failed that can be used to loop through the failed objects.
Step10: Working with LocalNexradFile objects
Step11: Now lets plot velocity data for the same scans.
|
<ASSISTANT_TASK:>
Python Code:
import nexradaws
conn = nexradaws.NexradAwsInterface()
years = conn.get_avail_years()
print(years)
months = conn.get_avail_months('2013')
print(months)
days = conn.get_avail_days('2013','05')
print(days)
radars = conn.get_avail_radars('2013','05','31')
print(radars)
availscans = conn.get_avail_scans('2013', '05', '31', 'KTLX')
print("There are {} NEXRAD files available for May 31st, 2013 for the KTLX radar.\n".format(len(availscans)))
print(availscans[0:4])
central_timezone = pytz.timezone('US/Central')
radar_id = 'KTLX'
start = central_timezone.localize(datetime(2013,5,31,17,0))
end = central_timezone.localize (datetime(2013,5,31,19,0))
scans = conn.get_avail_scans_in_range(start, end, radar_id)
print("There are {} scans available between {} and {}\n".format(len(scans), start, end))
print(scans[0:4])
results = conn.download(scans[0:4], templocation)
print(results.success)
for scan in results.iter_success():
print ("{} volume scan time {}".format(scan.radar_id,scan.scan_time))
print("{} downloads failed.".format(results.failed_count))
print(results.failed)
fig = plt.figure(figsize=(16,12))
for i,scan in enumerate(results.iter_success(),start=1):
ax = fig.add_subplot(2,2,i)
radar = scan.open_pyart()
display = pyart.graph.RadarDisplay(radar)
display.plot('reflectivity',0,ax=ax,title="{} {}".format(scan.radar_id,scan.scan_time))
display.set_limits((-150, 150), (-150, 150), ax=ax)
fig = plt.figure(figsize=(16,12))
for i,scan in enumerate(results.iter_success(),start=1):
ax = fig.add_subplot(2,2,i)
radar = scan.open_pyart()
display = pyart.graph.RadarDisplay(radar)
display.plot('velocity',1,ax=ax,title="{} {}".format(scan.radar_id,scan.scan_time))
display.set_limits((-150, 150), (-150, 150), ax=ax)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The AstroPy package - QTable
Step2: Renaming columns
Step3: Sorting
Step4: Masking
Step5: Adding a column to the Table
Step6: Saving a table
Step7: The Pandas package - DataFrame
Step8: Renaming columns
Step9: Sorting
Step10: Masking
Step11: Adding a column to the Table
Step12: Saving a table
Step13: QTables vs. DataFrames
Step14: Messy Data
Step15: Skip the header
Step16: NaN = Not_A_Number, python's null value
Step17: Option 2 - Add the column names
Step18: Deal with the missing data with fillna()
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
from astropy.table import QTable
os.listdir()
planet_table = QTable.read('Planets.csv', format='ascii.csv')
planet_table
print(planet_table)
planet_table.rename_column('col2', 'ecc')
print(planet_table)
planet_table['Name']
planet_table['Name'][0]
planet_table.sort(['ecc'])
planet_table
planet_table.sort(['a']) # re-sort our table
mask1 = np.where(planet_table['a'] > 5)
mask1
planet_table[mask1]
mask2 = ((planet_table['a'] > 5) &
(planet_table['ecc'] < 0.05))
planet_table[mask2]
perihelion = planet_table['a'] * (1.0 - planet_table['ecc'])
perihelion
planet_table['Peri'] = perihelion
planet_table
planet_table.write('NewPlanets.csv', format='ascii.csv')
os.listdir()
import pandas as pd
planet_table2 = pd.read_csv('Planets.csv')
planet_table2
print(planet_table2)
planet_table2.rename(columns={'Unnamed: 2': 'ecc'}, inplace=True)
planet_table2
planet_table2['Name']
planet_table['Name'][0]
planet_table2.sort_values(['ecc'])
planet_table2
planet_table2.sort_values(['ecc'], ascending=False)
mask3 = planet_table['a'] > 5
mask3
planet_table2[mask3]
mask4 = ((planet_table2['a'] > 5) &
(planet_table2['ecc'] < 0.05))
planet_table2[mask4]
perihelion = planet_table2['a'] * (1.0 - planet_table2['ecc'])
perihelion
planet_table2['Peri'] = perihelion
planet_table2
planet_table2.to_csv('NewPlanets2.csv', index=False)
os.listdir()
import datetime
doctor_table = pd.read_csv('Doctor.csv')
doctor_table
doctor_table.sort_values(['BirthDate'])
doctor_table['BirthDate'] = pd.to_datetime(doctor_table['BirthDate'])
doctor_table.sort_values(['BirthDate'])
today = datetime.date.today()
today
age = today - doctor_table['BirthDate']
age
doctor_table['AgeToday'] = age / np.timedelta64(1, 'Y')
doctor_table
doctor_table.describe()
messy_table = pd.read_csv('Mess.csv')
messy_table = pd.read_csv('Mess.csv', skiprows = 6)
messy_table
messy_table = pd.read_csv('Mess.csv', skiprows = 6, header= None)
messy_table
cols = ["Name", "Size"]
messy_table = pd.read_csv('Mess.csv', skiprows = 6, names = cols)
messy_table
messy_table['Name'].fillna("unknown", inplace=True)
messy_table['Size'].fillna(999.0, inplace=True)
messy_table
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Risk Factor Models
Step2: We then define a market environment containing the major parameter specifications needed,
Step3: Next, the model object for the first risk factor, based on the geometric Brownian motion (Black-Scholes-Merton (1973) model).
Step4: Some paths visualized.
Step5: Second risk factor with higher volatility. We overwrite the respective value in the market environment.
Step6: Valuation Models
Step7: The first derivative is an American put option on the first risk factor gbm_1.
Step8: Let us calculate a Monte Carlo present value estimate and estimates for the major Greeks.
Step9: The second derivative is a European call option on the second risk factor gbm_2.
Step10: Valuation and Greek estimation for this option.
Step11: Options Portfolio
Step12: To compose a portfolio consisting of our just defined options, we need to define derivatives positions. Note that this step is independent from the risk factor model and option model definitions. We only use the market environment data and some additional information needed (e.g. payoff functions).
Step13: Let us define the relevant market by 2 Python dictionaries, the correlation between the two risk factors and a valuation environment.
Step14: These are used to define the derivatives portfolio.
Step15: Simulation and Valuation
Step16: Via the get_statistics methods delta and vega values are provided as well.
Step17: Much more complex scenarios are possible with DX Analytics
|
<ASSISTANT_TASK:>
Python Code:
import dx
import datetime as dt
import pandas as pd
import seaborn as sns; sns.set()
r = dx.constant_short_rate('r', 0.01)
me_1 = dx.market_environment('me', dt.datetime(2015, 1, 1))
me_1.add_constant('initial_value', 100.)
# starting value of simulated processes
me_1.add_constant('volatility', 0.2)
# volatiltiy factor
me_1.add_constant('final_date', dt.datetime(2016, 6, 30))
# horizon for simulation
me_1.add_constant('currency', 'EUR')
# currency of instrument
me_1.add_constant('frequency', 'W')
# frequency for discretization
me_1.add_constant('paths', 10000)
# number of paths
me_1.add_curve('discount_curve', r)
# number of paths
gbm_1 = dx.geometric_brownian_motion('gbm_1', me_1)
pdf = pd.DataFrame(gbm_1.get_instrument_values(), index=gbm_1.time_grid)
%matplotlib inline
pdf.ix[:, :10].plot(legend=False, figsize=(10, 6))
me_2 = dx.market_environment('me_2', me_1.pricing_date)
me_2.add_environment(me_1) # add complete environment
me_2.add_constant('volatility', 0.5) # overwrite value
gbm_2 = dx.geometric_brownian_motion('gbm_2', me_2)
pdf = pd.DataFrame(gbm_2.get_instrument_values(), index=gbm_2.time_grid)
pdf.ix[:, :10].plot(legend=False, figsize=(10, 6))
me_opt = dx.market_environment('me_opt', me_1.pricing_date)
me_opt.add_environment(me_1)
me_opt.add_constant('maturity', dt.datetime(2016, 6, 30))
me_opt.add_constant('strike', 110.)
am_put = dx.valuation_mcs_american_single(
name='am_put',
underlying=gbm_1,
mar_env=me_opt,
payoff_func='np.maximum(strike - instrument_values, 0)')
am_put.present_value()
am_put.delta()
am_put.vega()
eur_call = dx.valuation_mcs_european_single(
name='eur_call',
underlying=gbm_2,
mar_env=me_opt,
payoff_func='np.maximum(maturity_value - strike, 0)')
eur_call.present_value()
eur_call.delta()
eur_call.vega()
me_1.add_constant('model', 'gbm')
me_2.add_constant('model', 'gbm')
put = dx.derivatives_position(
name='put',
quantity=2,
underlyings=['gbm_1'],
mar_env=me_opt,
otype='American single',
payoff_func='np.maximum(strike - instrument_values, 0)')
call = dx.derivatives_position(
name='call',
quantity=3,
underlyings=['gbm_2'],
mar_env=me_opt,
otype='European single',
payoff_func='np.maximum(maturity_value - strike, 0)')
risk_factors = {'gbm_1': me_1, 'gbm_2' : me_2}
correlations = [['gbm_1', 'gbm_2', -0.4]]
positions = {'put' : put, 'call' : call}
val_env = dx.market_environment('general', dt.datetime(2015, 1, 1))
val_env.add_constant('frequency', 'W')
val_env.add_constant('paths', 10000)
val_env.add_constant('starting_date', val_env.pricing_date)
val_env.add_constant('final_date', val_env.pricing_date)
val_env.add_curve('discount_curve', r)
port = dx.derivatives_portfolio(
name='portfolio', # name
positions=positions, # derivatives positions
val_env=val_env, # valuation environment
risk_factors=risk_factors, # relevant risk factors
correlations=correlations, parallel=True) # correlation between risk factors
port.get_values()
port.get_statistics()
deltas, benchvalue = port.get_port_risk(Greek='Delta')
dx.risk_report(deltas)
dx.risk_report(deltas.ix[:, :, 'value'] - benchvalue)
vegas, benchvalue = port.get_port_risk(Greek='Vega', step=0.05)
dx.risk_report(vegas)
dx.risk_report(vegas.ix[:, :, 'value'] - benchvalue)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Parsing example
Step2: 2. Determine different genres
Step3: 3. Create vector of genres for each movie and a dataframe
Step4: Observe the result in the dataframe
Step5: VIsual example of the genres
Step6: 3.1 Determine most frequent genres
Step7: Display of the number of times a genre appears in the dataframe
Step8: 3.2 Determine genres that are most commonly associated with each other
Step9: Determining ranking of genre associations
Step10: Display of the ranking of the other genres with which each genre is most often associated
Step11: 3.3 Determine how many films are successful or are non-successful depending on the genre
Step12: Don't forget that the number of successful movies is not equal to the sum of the success rate of the genres since movies often have multiple genres.
Step13: 4. Create a similarity graph between films depending on genre
Step14: 4.1 Normalization of the matrix
Step15: Maximum normalization
Step16: Plot the degree distribution
Step17: Gaussian normalization
Step18: 4.3 Save the dataset
Step19: 5. Graph Laplacian and Embedding for maximum normalization
Step20: Normally
Step21: 5.2 Compute the Fourier basis
Step22: Normally
Step23: 5.3 Graph embedding
Step24: 6. Graph Laplacian and Embedding for gaussian normalization
Step25: 6.1. Save the sparsed dataset
Step26: 6.2. Laplacian and graph embedding
Step27: Other
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import configparser
import os
import requests
from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import sparse, stats, spatial
import scipy.sparse.linalg
from sklearn import preprocessing, decomposition
import librosa
import IPython.display as ipd
import json
from imdb import IMDb
import tmdbsimple as tmdb
from pygsp import graphs, filters, plotting
plt.rcParams['figure.figsize'] = (17, 5)
plotting.BACKEND = 'matplotlib'
df = pd.read_csv('Saved_Datasets/NewFeaturesDataset.csv')
print('There are {} movies'.format(len(df)))
df['genres'][1]
df.head()
#df.iloc[100:150]
df['genres'] = df['genres'].str.replace('|', ',')
i = 1
newgenres = df['genres'][i].split(",")
print(newgenres)
print(len(newgenres))
Diffgenres = [];
genres = {}
movies_dic = {}
for i in range(0, len(df)):
movies_dic[i] = df['id'][i]
if df['genres'][i] == 'NaN':
newgenres = []
else:
newgenres = df['genres'][i].split(",")
genres.setdefault(i, [])
for j in range (0, len(newgenres)):
Diffgenres.append(newgenres[j])
genres[i].append(newgenres[j])
Diffgenres = set(Diffgenres)
Diffgenres = list(Diffgenres)
print('There are {} different genres'.format(len(Diffgenres)))
print(Diffgenres)
df.head()
print(genres[0][0])
len(genres[0][0])
vector = (genres[0][0] == np.array(Diffgenres)).astype(int)
print(vector)
genreArray = np.ndarray(shape=(len(df), len(Diffgenres)), dtype=int)
for i in range(0, len(df)):
vector = np.zeros(len(Diffgenres))
for j in range(0, len(genres[i])):
vector += (genres[i][j] == np.array(Diffgenres)).astype(int)
genreArray[i] = vector
print(genreArray[0])
print(genreArray.size)
Genres = pd.DataFrame(genreArray, columns=Diffgenres)
Genres.head(10)
#Genres.iloc[120:150]
plt.spy(Genres[120:150])
freqGenre = np.ndarray(shape=(1, len(Diffgenres)), dtype=int)
for i in range(0, len(Diffgenres)):
freqGenre[0][i] = sum(Genres[Diffgenres[i]] == 1)
NbGenre = pd.DataFrame(freqGenre, columns=Diffgenres)
NbGenre
NbGenre.to_csv('Saved_Datasets/NbGenre.csv', index=False)
plt.bar(Diffgenres, freqGenre[0], align='center');
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right');
plt.xlabel('Genres');
plt.ylabel('Counts');
plt.savefig('images/GenreFreq.png', dpi =300, bbox_inches='tight')
assosGenre = np.ndarray(shape=(len(Diffgenres), len(Diffgenres)), dtype=int)
for i in range(0, len(Diffgenres)):
for j in range(0, len(Diffgenres)):
if i != j:
assosGenre[i][j] = sum((Genres[Diffgenres[i]] == 1) & (Genres[Diffgenres[j]] == 1))
else:
assosGenre[i][j] = 0
#ensure the matrix is symmetric
assosGenreSym = assosGenre.transpose() > assosGenre
assosGenre = assosGenre - assosGenre*assosGenreSym + assosGenre.transpose()*assosGenreSym
plt.spy(assosGenre)
NbGenreAssos = pd.DataFrame(assosGenre, columns=Diffgenres, index = Diffgenres)
NbGenreAssos
NbGenreAssos.to_csv('Saved_Datasets/NbGenreAssos.csv', index=False)
assosRank = {}
rank = np.argsort(-assosGenre, axis=1) #negative for ascending order
Diffgenres[rank[0][1]]
for i in range(0, len(Diffgenres)):
for j in range(0, len(Diffgenres)):
assosRank.setdefault(Diffgenres[i], [])
#Only if not comparing with the same genre
if Diffgenres[i] != Diffgenres[rank[i][j]]:
assosRank[Diffgenres[i]].append(Diffgenres[rank[i][j]])
ranking = np.linspace(1, len(Diffgenres)-1, num=len(Diffgenres)-1, endpoint=True, retstep=False, dtype=int)
Rankdf = pd.DataFrame(assosRank, index=ranking)
Rankdf
Rankdf.to_csv('Saved_Datasets/GenreRanking.csv', index=False)
genreArray[0]
genreSuccess = np.zeros(shape=(1, len(Diffgenres)), dtype=float)
genreSuccessPc = np.zeros(shape=(1, len(Diffgenres)), dtype=float)
for i in range(0, len(Diffgenres)):
for j in range(0, len(df)):
if genreArray[j][i] == 1:
if df['success'][j] == 1:
genreSuccess[0][i] += 1
genreSuccessPc[0][i] = (genreSuccess[0][i]/freqGenre[0][i])*100
plt.bar(Diffgenres, genreSuccessPc[0], align='center');
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right');
plt.xlabel('Genres');
plt.ylabel('Success rate [%]');
print(sum(sum(genreSuccess)))
print('The number of films that are succesful: {}'.format(len(df[df['success'] == 1])))
print('The number of films that are unsuccesful: {}'.format(len(df[df['success'] == 0])))
weights = np.ndarray(shape=(len(df), len(df)), dtype=int)
weights = genreArray @ genreArray.T
#fill the diagonal values to zero, i.e. no self-connections
np.fill_diagonal(weights, 0)
plt.spy(weights)
plt.hist(weights[weights > 0].reshape(-1), bins=50);
print('There are {} weights equal to zero'.format(np.sum(weights == 0)))
print('There are {} weights equal to one'.format(np.sum(weights == 1)))
print('There are {} weights equal to seven'.format(np.sum(weights == 7)))
meanW = weights.mean()
maxW = weights.max()
minW = weights.min()
print('The mean value of the similarity in terms of genre is: {}'.format(meanW))
print('The max value of the similarity is: {}'.format(maxW))
print('The min value of the similarity is: {}'.format(minW))
print(genreArray[1])
print(sum(genreArray[1]))
weightsNorm = np.ndarray(shape=(len(df), len(df)), dtype=float)
lengths = np.ndarray(shape=(1, 2), dtype=int)
lenMax = 0;
for i in range(0, len(weights)):
for j in range(0, len(weights)):
if i!=j:
lengths = [sum(genreArray[i]), sum(genreArray[j])]
weightsNorm[i][j] = (weights[i][j])/max(lengths)
np.fill_diagonal(weightsNorm, 0)
sigma = np.std(weights)
print(sigma)
mu = np.mean(weights)
print(mu)
#1/(sigma*math.sqrt(2*math.pi))*
Wgauss = np.exp(-((weights-mu)**2)/(2*sigma**2))
#fill the diagonal values to zero, i.e. no self-connections
np.fill_diagonal(Wgauss, 0)
plt.spy(weightsNorm)
plt.hist(weightsNorm.reshape(-1), bins=50);
print('The mean value is: {}'.format(weightsNorm.mean()))
print('The max value is: {}'.format(weightsNorm.max()))
print('The min value is: {}'.format(weightsNorm.min()))
degrees = np.zeros(len(weightsNorm))
#reminder: the degrees of a node for a weighted graph are the sum of its weights
for i in range(0, len(weightsNorm)):
degrees[i] = sum(weightsNorm[i])
plt.hist(degrees, bins=50);
print('The mean value is: {}'.format(degrees.mean()))
print('The max value is: {}'.format(degrees.max()))
print('The min value is: {}'.format(degrees.min()))
plt.spy(Wgauss)
NormW = pd.DataFrame(weightsNorm)
NormW.head()
NormW.to_csv('Saved_Datasets/NormalizedGenreW.csv', index=False)
G = graphs.Graph(weightsNorm)
G.compute_laplacian('normalized')
#reminder: L = D - W for weighted graphs
laplacian = np.diag(degrees) - weightsNorm
#computation of the normalized Laplacian
laplacian_norm = scipy.sparse.csgraph.laplacian(weightsNorm, normed = True)
plt.spy(laplacian_norm);
laplacian_norm = sparse.csr_matrix(laplacian_norm)
G.compute_fourier_basis(recompute=True)
plt.plot(G.e[0:10]);
eigenvalues, eigenvectors = sparse.linalg.eigsh(laplacian_norm, k = 10, which = 'SM')
plt.plot(eigenvalues, '.-', markersize=15);
plt.xlabel('')
plt.ylabel('Eigenvalues')
plt.show()
genres = preprocessing.LabelEncoder().fit_transform(df['success'])
x = eigenvectors[:, 1]
y = eigenvectors[:, 2]
plt.scatter(x, y, c=genres, cmap='RdBu', alpha=0.5);
G.set_coordinates(G.U[:, 1:3])
G.plot()
G.plot_signal(genres, vertex_size=20)
NEIGHBORS = 300
#sort the order of the weights
sort_order = np.argsort(Wgauss, axis = 1)
#declaration of a sorted weight matrix
sorted_weights = np.zeros((len(Wgauss), len(Wgauss)))
for i in range (0, len(Wgauss)):
for j in range(0, len(Wgauss)):
if (j >= len(Wgauss) - NEIGHBORS):
#copy the k strongest edges for each node
sorted_weights[i, sort_order[i,j]] = Wgauss[i,sort_order[i,j]]
else:
#set the other edges to zero
sorted_weights[i, sort_order[i,j]] = 0
#ensure the matrix is symmetric
bigger = sorted_weights.transpose() > sorted_weights
sorted_weights = sorted_weights - sorted_weights*bigger + sorted_weights.transpose()*bigger
plt.spy(sorted_weights)
plt.hist(sorted_weights.reshape(-1), bins=50);
NormW = pd.DataFrame(sorted_weights)
NormW.head()
NormW.to_csv('Saved_Datasets/NormalizedGenreWSparse.csv', index=False)
G = graphs.Graph(sorted_weights)
G.compute_laplacian('normalized')
#reminder: L = D - W for weighted graphs
laplacian = np.diag(degrees) - sorted_weights
#computation of the normalized Laplacian
laplacian_norm = scipy.sparse.csgraph.laplacian(sorted_weights, normed = True)
plt.spy(laplacian_norm);
G.compute_fourier_basis(recompute=True)
plt.plot(G.e[0:10]);
G.set_coordinates(G.U[:, 1:3])
G.plot()
G.plot_signal(genres, vertex_size=20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make the API call with our list of targets to find the associations. Set facets to true.
Step2: Print out all the json returned just for reference
Step3: The therapeutic area facets look interesting - lets iterate through these and display
Step4: Sort by target count and then disease count
Step5: Using the python tabulate library to render a pretty table of our extracted therapeutic areas.
Step6: Lets just consider the first 5 top therapeutic areas
Step7: Now for each of those identify the top 5 diseases. Unfortunately we don't get the disease names in the facets, just the codes. Is this is the right approach then an API change???
|
<ASSISTANT_TASK:>
Python Code:
targets = ['ENSG00000069696', 'ENSG00000144285']
targets_string = ', '.join('"{0}"'.format(t) for t in targets)
url = 'https://www.targetvalidation.org/api/latest/public/association/filter'
headers = {"Accept": "application/json"}
# There may be an easier way of building these parameters...
data = "{\"target\":[" + targets_string + "], \"facets\":true}"
response = requests.post(url, headers=headers, data=data)
output = response.json()
#print json.dumps(output, indent=2)
therapeuticareas = []
for bucket in output['facets']['therapeutic_area']['buckets']:
therapeuticareas.append({
'target_count' : bucket['unique_target_count']['value'],
'disease_count' : bucket['unique_disease_count']['value'],
'therapeutic_area' : bucket['label'],
'key' : bucket['key']
})
therapeuticareas = sorted(therapeuticareas, key=lambda k: (k['target_count'],k['disease_count']), reverse=True)
print tabulate(therapeuticareas, headers="keys", tablefmt="grid")
therapeuticareas = therapeuticareas[:5]
print tabulate(therapeuticareas, headers="keys", tablefmt="grid")
for therapeuticarea in therapeuticareas:
print "Therapeutic area: " + therapeuticarea['therapeutic_area']
data = "{\"target\":[" + targets_string + "], \"facets\":true, \"therapeutic_area\":[\"" + therapeuticarea['key'] + "\"]}"
response = requests.post(url, headers=headers, data=data)
output = response.json()
diseases = []
for bucket in output['facets']['disease']['buckets']:
diseases.append({
'target_count' : bucket['unique_target_count']['value'],
'doc_count' : bucket['doc_count'],
'key' : bucket['key']
})
# Sort and take top 5
diseases = sorted(diseases, key=lambda k: (k['target_count'],k['doc_count']), reverse=True)
diseases = diseases[:5]
print tabulate(diseases, headers="keys", tablefmt="grid")
print ""
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.sparse import csr_matrix
arr = np.random.rand(4, 4)
M = csr_matrix(arr)
result = M.A.diagonal(0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** -0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** -0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x: 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
# def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
# self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
hidden_error = np.dot(error, self.weights_hidden_to_output.T)
output_error_term = error
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 1000000
learning_rate = 0.0025
hidden_nodes = 25
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train': [], 'validation': []}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Import useful functions from previous notebooks
Step3: We will also need the normalize_features() function from Week 5 that normalizes all feature columns to unit norm. Paste this function below.
Step4: Extract features and normalize
Step5: In computing distances, it is crucial to normalize features. Otherwise, for example, the sqft_living feature (typically on the order of thousands) would exert a much larger influence on distance than the bedrooms feature (typically on the order of ones). We divide each column of the training feature matrix by its 2-norm, so that the transformed column has unit norm.
Step6: Compute a single distance
Step7: The subtraction operator (-) in Numpy is vectorized as follows
Step8: Note that the output of this vectorized operation is identical to that of the loop above, which can be verified below
Step9: Aside
Step10: The next step in computing the Euclidean distances is to take these feature-by-feature differences in diff, square each, and take the sum over feature indices. That is, compute the sum of square feature differences for each training house (row in diff).
Step11: With this result in mind, write a single-line expression to compute the Euclidean distances between the query house and all houses in the training set. Assign the result to a variable distances.
Step12: Now you are ready to write a function that computes the distances from a query house to all training houses. The function should take two parameters
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data_small.gl/')
import numpy as np # note this allows us to refer to numpy as np instead
(train_and_validation, test) = sales.random_split(.8, seed=1) # initial train/test split
(train, validation) = train_and_validation.random_split(.8, seed=1) # split training set into training and validation sets
feature_list = ['bedrooms',
'bathrooms',
'sqft_living',
'sqft_lot',
'floors',
'waterfront',
'view',
'condition',
'grade',
'sqft_above',
'sqft_basement',
'yr_built',
'yr_renovated',
'lat',
'long',
'sqft_living15',
'sqft_lot15']
features_train, output_train = get_numpy_data(train, feature_list, 'price')
features_test, output_test = get_numpy_data(test, feature_list, 'price')
features_valid, output_valid = get_numpy_data(validation, feature_list, 'price')
features_train, norms = normalize_features(features_train) # normalize training set features (columns)
features_test = features_test / norms # normalize test set by training set norms
features_valid = features_valid / norms # normalize validation set by training set norms
for i in xrange(3):
print features_train[i]-features_test[0]
# should print 3 vectors of length 18
print features_train[0:3] - features_test[0]
# verify that vectorization works
results = features_train[0:3] - features_test[0]
print results[0] - (features_train[0]-features_test[0])
# should print all 0's if results[0] == (features_train[0]-features_test[0])
print results[1] - (features_train[1]-features_test[0])
# should print all 0's if results[1] == (features_train[1]-features_test[0])
print results[2] - (features_train[2]-features_test[0])
# should print all 0's if results[2] == (features_train[2]-features_test[0])
print diff[-1].sum() # sum of the feature differences between the query and last training house
# should print -0.0934339605842
print np.sum(diff**2, axis=1)[15] # take sum of squares across each row, and print the 16th sum
print np.sum(diff[15]**2) # print the sum of squares for the 16th row -- should be same as above
print distances[100] # Euclidean distance between the query house and the 101th training house
# should print 0.0237082324496
import matplotlib.pyplot as plt
%matplotlib inline
kvals = range(1, 16)
plt.plot(kvals, rss_all,'bo-')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Grids
Step2: Data is stored as an ArrayIndexer object, which makes it easy to implement differencing on the entire array.
Step3: Running
Step4: Example
Step5: Example
|
<ASSISTANT_TASK:>
Python Code:
import mesh.patch as patch
import mesh.boundary as bnd
import numpy as np
g = patch.Grid2d(16, 16, ng=2)
print(g)
bc = bnd.BC(xlb="periodic", xrb="periodic", ylb="reflect", yrb="outflow")
print(bc)
d = patch.CellCenterData2d(g)
d.register_var("a", bc)
d.create()
print(d)
a = d.get_var("a")
b = g.scratch_array()
b.v()[:,:] = (a.ip(1) - a.ip(-1))/(2.0*a.g.dx)
from pyro import Pyro
pyro_sim = Pyro("advection")
pyro_sim.initialize_problem("tophat", "inputs.tophat",
other_commands=["mesh.nx=8", "mesh.ny=8",
"vis.dovis=0"])
pyro_sim.run_sim()
dens = pyro_sim.get_var("density")
dens.pretty_print(show_ghost=True, fmt="%6.2f")
pyro_sim.sim.dovis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Calculate the water-water radial distribution function. In statistical mechanics, the radial distribution function, (or pair correlation function) g(r) in a system of particles (atoms, molecules, colloids, etc.), describes how density varies as a function of distance from a reference particle.
Step2: Plot the g(r) versus r for O-O and O-H
Step3: Calculate molecule - water rdf
Step4: Plot the g(r) versus r for Ow-x
Step5: Plot the g(r) versus r for Hw-x
Step6: Print the distance between dop (residue 0) and residue 1 (H2O)
Step7: Calculate a dihedral angle as a function of time
Step8: Plot the results
|
<ASSISTANT_TASK:>
Python Code:
traj = pt.Trajectory('step5_production.dcd', '../step3_pbcsetup.xplor.ext.psf')
print(traj)
goo = pt.rdf(traj, solvent_mask=':TIP3@OH2', solute_mask=':TIP3@OH2', bin_spacing=0.05, maximum=8.)
goh1 = pt.rdf(traj, solvent_mask=':TIP3@OH2', solute_mask=':TIP3@H1', bin_spacing=0.05, maximum=8.)
goh2 = pt.rdf(traj, solvent_mask=':TIP3@OH2', solute_mask=':TIP3@H2', bin_spacing=0.05, maximum=8.)
plt.plot(goo[0],goo[1])
plt.plot(goo[0],goh1[1])
plt.plot((0, 8), (1, 1), 'k--')
plt.xlim([1.5,8])
plt.ylim([0,3])
plt.xlabel('$r_{ox} / \AA$', fontsize=14)
plt.ylabel('$g(r_{ox})$', fontsize=14)
plt.show()
gon = pt.rdf(traj, solvent_mask=':TIP3@OH2', solute_mask=':DOP@N23', bin_spacing=0.05, maximum=8.)
goh231 = pt.rdf(traj, solvent_mask=':TIP3@OH2', solute_mask=':DOP@H231', bin_spacing=0.05, maximum=8.)
goo1 = pt.rdf(traj, solvent_mask=':TIP3@OH2', solute_mask=':DOP@O1', bin_spacing=0.05, maximum=8.)
goh4 = pt.rdf(traj, solvent_mask=':TIP3@OH2', solute_mask=':DOP@H4', bin_spacing=0.05, maximum=8.)
ghwn23 = pt.rdf(traj, solvent_mask=':TIP3@H1', solute_mask=':DOP@N23', bin_spacing=0.05, maximum=8.)
ghwh231 = pt.rdf(traj, solvent_mask=':TIP3@H1', solute_mask=':DOP@H231', bin_spacing=0.05, maximum=8.)
ghwo1 = pt.rdf(traj, solvent_mask=':TIP3@H1', solute_mask=':DOP@O1', bin_spacing=0.05, maximum=8.)
ghwh4 = pt.rdf(traj, solvent_mask=':TIP3@H1', solute_mask=':DOP@H4', bin_spacing=0.05, maximum=8.)
plt.plot(gon[0],gon[1],label='OW...NH$_{3}$')
plt.plot(goh231[0],goh231[1],label='OW...HN')
plt.plot(goo1[0],goo1[1],label='OW...O1')
plt.plot(goh4[0],goh4[1],label='OW...HO1')
plt.plot((0, 8), (1, 1), 'k--')
plt.xlim([1.5,8])
plt.ylim([0,0.0015])
plt.xlabel('$r_{ox} / \AA$', fontsize=14)
plt.ylabel('$g(r_{ox})$', fontsize=14)
plt.legend(loc='upper right',frameon=False, bbox_to_anchor=(1.35, 0.95),numpoints=1)
plt.show()
plt.plot(ghwn23[0],ghwn23[1],label='HW...NH$_{3}$')
plt.plot(ghwh231[0],ghwh231[1],label='HW...HN')
plt.plot(ghwo1[0],ghwo1[1],label='HW...O1')
plt.plot(ghwh4[0],ghwh4[1],label='HW...HO1')
plt.plot((0, 8), (1, 1), 'k--')
plt.xlim([1.5,8])
plt.ylim([0,0.0008])
plt.xlabel('$r_{hwx} / \AA$', fontsize=14)
plt.ylabel('$g(r_{hwx})$', fontsize=14)
plt.legend(loc='upper right',frameon=False, bbox_to_anchor=(1.35, 0.95),numpoints=1)
plt.show()
data = pt.calc_pairdist(traj, mask=':TIP3@OH2', mask2=':DOP@N23')
print(data[:,0][:1])
atom_indices = pt.select(':DOP@N23', traj.top)
print(atom_indices)
dataset = pt.dihedral(traj, ':DOP@N23 :DOP@C2 :DOP@C3 :DOP@C1')
time = [i*0.002 for i in range(len(dataset))]
plt.plot(time,dataset, 'ro', label='$\phi$')
plt.xlabel('Time / ns', fontsize=14)
plt.ylabel('Angle / $\circ$', fontsize=14)
plt.legend(loc='upper right',frameon=False, bbox_to_anchor=(1.2, 0.95),numpoints=1)
plt.show()
hist0 = plt.hist(dataset,25)
tmp = plt.setp(hist0[2], 'facecolor', 'r', 'alpha', 0.75)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: and set up the simulation parameters where we introduce a new dimensionless parameter
Step2: Now we set up the system. As in part I, the orientation of the dipole moments is set directly on the particles, whereas the magnitude of the moments is taken into account when determining the prefactor of the dipolar P3M (for more details see part I).
Step3: We now apply the external magnetic field which is
Step4: Exercise
Step5: Now we can visualize the current state and see that the particles mostly create chains oriented in the direction of the external magnetic field. Also some monomers should be present.
Step7: Video of the development of the system
Step8: We now can start the sampling over the <tt>animation</tt> class of <tt>matplotlib</tt>
Step9: In the visualization video we can see that the single chains break and connect to each other during time. Also some monomers are present which break from and connect to chains. If you want to have some more frames, i.e. a longer video, just adjust the <tt>frames</tt> parameter in <tt>FuncAnimation</tt>.
Step10: To increase the performance we use the built-in function <tt>MagneticDipoleMoment</tt> to calculate the dipole moment of the whole system. In our case this is only the orientation as we never set the strength of the dipole moments on our particles.
Step11: For both the magnetization perpendicular and parallel to the monolayer plane we use the same system for every value of the Langevin parameter $\alpha$. Thus we use that the system is already more or less equilibrated from the previous run so we save some equilibration time. For scientific purposes one would use a new system for every value for the Langevin parameter to ensure that the systems are independent and no correlation effects are measured. Also one would perform more than just one simulation for each value of $\alpha$ to increase the precision of the results.
Step12: For the approximations of $M_{\parallel}^{\text{q2D}}$ and $M_{\perp}^{\text{q2D}}$ of Ref. <a href='#[1]'>[1]</a> we need the dipole moment of a single particle. Thus we calculate it from our dipolar interaction parameter $\lambda$
Step13: and the saturation magnetization by using
Step14: Further we need the derivation of the Langevin function after the external field $B$ thus we define the function
Step15: Now we define the approximated magnetization curves parallel and perpendicular to the monolayer plane
Step16: Now we define the Langevin function
|
<ASSISTANT_TASK:>
Python Code:
import espressomd
import espressomd.magnetostatics
import espressomd.magnetostatic_extensions
espressomd.assert_features('DIPOLES', 'LENNARD_JONES')
import numpy as np
# Lennard-Jones parameters
LJ_SIGMA = 1.
LJ_EPSILON = 1.
LJ_CUT = 2**(1. / 6.) * LJ_SIGMA
# Particles
N_PART = 700
# Area fraction of the mono-layer
PHI = 0.06
# Dipolar interaction parameter lambda = MU_0 m^2 /(4 pi sigma^3 kT)
DIP_LAMBDA = 4.
# Temperature
KT = 1.0
# Friction coefficient
GAMMA = 1.0
# Time step
TIME_STEP = 0.01
# Langevin parameter ALPHA = MU_0 m H / kT
ALPHA = 10.
# vacuum permeability
MU_0 = 1.
# System setup
box_size = (N_PART * np.pi * (LJ_SIGMA / 2.)**2. / PHI)**0.5
print("Box size", box_size)
# Note that the dipolar P3M and dipolar layer correction need a cubic
# simulation box for technical reasons.
system = espressomd.System(box_l=(box_size, box_size, box_size))
system.time_step = TIME_STEP
# Lennard-Jones interaction
system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA, cutoff=LJ_CUT, shift="auto")
# Random dipole moments
np.random.seed(seed=1)
dip_phi = 2. * np.pi * np.random.random((N_PART, 1))
dip_cos_theta = 2 * np.random.random((N_PART, 1)) - 1
dip_sin_theta = np.sin(np.arccos(dip_cos_theta))
dip = np.hstack((
dip_sin_theta * np.sin(dip_phi),
dip_sin_theta * np.cos(dip_phi),
dip_cos_theta))
# Random positions in the monolayer
pos = box_size * np.hstack((np.random.random((N_PART, 2)), np.zeros((N_PART, 1))))
# Add particles
particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)], dip=dip, fix=N_PART * [(False, False, True)])
# Remove overlap between particles by means of the steepest descent method
system.integrator.set_steepest_descent(
f_max=0, gamma=0.1, max_displacement=0.05)
while system.analysis.energy()["total"] > 5 * KT * N_PART:
system.integrator.run(20)
# Switch to velocity Verlet integrator
system.integrator.set_vv()
system.thermostat.set_langevin(kT=KT, gamma=GAMMA, seed=1)
# tune verlet list skin
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
# Setup dipolar P3M and dipolar layer correction (DLC)
dp3m = espressomd.magnetostatics.DipolarP3M(accuracy=5E-4, prefactor=DIP_LAMBDA * LJ_SIGMA**3 * KT)
dlc = espressomd.magnetostatic_extensions.DLC(maxPWerror=1E-4, gap_size=box_size - LJ_SIGMA)
system.actors.add(dp3m)
system.actors.add(dlc)
# tune verlet list skin again
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
# print skin value
print('tuned skin = {}'.format(system.cell_system.skin))
# magnetic field times dipole moment
H_dipm = ALPHA * KT
H_field = [H_dipm, 0, 0]
# Equilibrate
print("Equilibration...")
equil_rounds = 10
equil_steps = 200
for i in range(equil_rounds):
system.integrator.run(equil_steps)
print("progress: {:3.0f}%, dipolar energy: {:9.2f}".format(
(i + 1) * 100. / equil_rounds, system.analysis.energy()["dipolar"]), end="\r")
print("\nEquilibration done")
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
plt.xlim(0, box_size)
plt.ylim(0, box_size)
plt.xlabel('x-position', fontsize=20)
plt.ylabel('y-position', fontsize=20)
plt.plot(particles.pos_folded[:, 0], particles.pos_folded[:, 1], 'o')
plt.show()
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import tempfile
import base64
VIDEO_TAG = <video controls>
<source src="data:video/x-m4v;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>
def anim_to_html(anim):
if not hasattr(anim, '_encoded_video'):
with tempfile.NamedTemporaryFile(suffix='.mp4') as f:
anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264'])
with open(f.name, "rb") as g:
video = g.read()
anim._encoded_video = base64.b64encode(video).decode('ascii')
plt.close(anim._fig)
return VIDEO_TAG.format(anim._encoded_video)
animation.Animation._repr_html_ = anim_to_html
def init():
# Set x and y range
ax.set_ylim(0, box_size)
ax.set_xlim(0, box_size)
xdata, ydata = [], []
part.set_data(xdata, ydata)
return part,
def run(i):
system.integrator.run(50)
# Save current system state as a plot
xdata, ydata = particles.pos_folded[:, 0], particles.pos_folded[:, 1]
ax.figure.canvas.draw()
part.set_data(xdata, ydata)
print("progress: {:3.0f}%".format(i + 1), end="\r")
return part,
fig, ax = plt.subplots(figsize=(10, 10))
part, = ax.plot([], [], 'o')
animation.FuncAnimation(fig, run, frames=100, blit=True, interval=0, repeat=False, init_func=init)
# Dipolar interaction parameter lambda = MU_0 m^2 /(4 pi sigma^3 kT)
DIP_LAMBDA = 1.
# increase time step
TIME_STEP = 0.02
# dipole moment
dipm = np.sqrt(4 * np.pi * DIP_LAMBDA * LJ_SIGMA**3. * KT / MU_0)
# remove all particles
system.part.clear()
system.thermostat.turn_off()
# Random dipole moments
dip_phi = 2. * np.pi * np.random.random((N_PART, 1))
dip_cos_theta = 2 * np.random.random((N_PART, 1)) - 1
dip_sin_theta = np.sin(np.arccos(dip_cos_theta))
dip = np.hstack((
dip_sin_theta * np.sin(dip_phi),
dip_sin_theta * np.cos(dip_phi),
dip_cos_theta))
# Random positions in the monolayer
pos = box_size * np.hstack((np.random.random((N_PART, 2)), np.zeros((N_PART, 1))))
# Add particles
particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)], dip=dip, fix=N_PART * [(False, False, True)])
# Remove overlap between particles by means of the steepest descent method
system.integrator.set_steepest_descent(f_max=0, gamma=0.1, max_displacement=0.05)
while system.analysis.energy()["total"] > 5 * KT * N_PART:
system.integrator.run(20)
# Switch to velocity Verlet integrator
system.integrator.set_vv()
system.thermostat.set_langevin(kT=KT, gamma=GAMMA, seed=1)
# tune verlet list skin
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
# Setup dipolar P3M and dipolar layer correction
system.actors.remove(dp3m)
system.actors.remove(dlc)
dp3m = espressomd.magnetostatics.DipolarP3M(accuracy=5E-4, prefactor=DIP_LAMBDA * LJ_SIGMA**3 * KT)
dlc = espressomd.magnetostatic_extensions.DLC(maxPWerror=1E-4, gap_size=box_size - LJ_SIGMA)
system.actors.add(dp3m)
system.actors.add(dlc)
# tune verlet list skin again
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
alphas = np.array([0, 0.25, 0.5, 1, 2, 3, 4, 8])
import matplotlib.pyplot as plt
# dipole moment
dipm = np.sqrt(DIP_LAMBDA * 4 * np.pi * LJ_SIGMA**3. * KT / MU_0)
print('dipole moment = {}'.format(dipm))
M_sat = PHI * 4. / np.pi * 1. / (LJ_SIGMA**2.) * dipm
def dL_dB(alpha):
return (1. / (alpha**2.) - 1. / ((np.sinh(alpha))**2.)) * dipm / (KT)
# approximated magnetization curve for a field parallel to the monolayer plane
def magnetization_approx_para(alpha):
return L(alpha) * (1. + MU_0 / 8. * M_sat * dL_dB(alpha))
# approximated magnetization curve for a field perpendicular to the monolayer plane
def magnetization_approx_perp(alpha):
return L(alpha) * (1. - MU_0 / 4. * M_sat * dL_dB(alpha))
# Langevin function
def L(x):
return (np.cosh(x) / np.sinh(x)) - 1. / x
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: this is usually called "least-squares" fitting
Step2: Overfitting
Step3: General rule of thumb
Step4: Weighted Least Squares
Step5: Oops what happened? Well, the model value at x=0 is 0 in this case, and the errors are too, so our 1/errors_poissson statement becomes problematic because we can't divide by zero.
Step6: Similarly, we can build in the uncertainties/weights when we do the least squares fit to the data. As before, the function will minimize the least squares sum to find the best fit, but this time the version with the weights.
Step7: Chi Squared Test for Goodness of Fit
Step8: Why did this break? Well, again because the model has a value of zero at one point, and dividing by zero results in infinity. This is a very real problem with the chi-squared statistic, but we can dance our way around it by removing the datapoint at x=0 from consideration.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
#generate some random numbers with values between -0.5 and 0.5, which we'll call "noise"
noise = (np.random.rand(11)-0.5)
noise
#plot simple relationship y=2x with this noise added
x = np.arange(11)
plt.plot(x,2*x+noise, 'bo')
plt.plot(x,2*x,'r--')
plt.xlim(0,10)
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("tight scatter")
#make noisier noise (between -5 and 5)
noise2 = (np.random.rand(11)-0.5)*10
noise2
noise = (np.random.rand(11)-0.5)*10
plt.plot(x,2*x+noise, 'go')
plt.plot(x,2*x)
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("large scatter")
#compare model and "actual data" (that I made up in this case)
model = 2*x
data = 2*x+noise
errors = model-data
errors
#squaring and square rooting gives us positive distances
errors_pos = np.sqrt((model-data)**2)
errors_pos
#then add them all up to get a total measure of the difference
total_error = sum(errors_pos)
total_error
#now, let's assume that I have only the data and no model
plt.plot(x,2*x+noise, 'go')
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("no fit")
#now this sum of squares metric might allow me to judge the quality of one model relative to another. For example:
plt.plot(x,2*x+noise, 'go')
plt.plot(x,2*x)
plt.plot(x,2.3*x-2,'r--')
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("potential fits")
#they both look like reasonable matches to the data, so how do I know which one matches better?
model1 = 2*x
model2 = 2.3*x - 2
error1 = sum(np.sqrt((model1-data)**2))
error2 = sum(np.sqrt((model2-data)**2))
print(error1, error2)
#But I can also devise a model other than the one that I used to define the synthetic data that matches better
#than the actual one I used, for example:
model1 = 2*x
model2 = 1.99*x
error1 = sum(np.sqrt((model1-data)**2))
error2 = sum(np.sqrt((model2-data)**2))
print(error1, error2)
#python has lots of built-in functionalities for this kind of thing, but we'll use the scipy optimize curve_fit function
import scipy.optimize as optimization
#to use it, you have to define a functional form for the fit line BUT NOT THE SPECIFIC VALUES
#for linear (straight line) fits this could take two forms
#line without an intercept (intercept zero)
def slopefunc(x,sl):
return sl*x
#line with an intercept
def slopeintfunc(x,sl,incpt):
return sl*x+incpt
#we could continue this to functions of arbitraty order
#for example, quadratic:
def quadfunc(x,a,b,c):
return a+b*x+c*x*x
#then use curve_fit
fit = optimization.curve_fit(slopeintfunc,x,data)
#the zeroth element then contains the optimal parameters for the functional parameters (in this case sl, incpt)
fit[0]
#and the next element contains what's called the covariance matrix
fit[1]
#let's plot it over the data now
plt.plot(x,2*x+noise, 'go')
plt.plot(x, slopeintfunc(x,fit[0][0],fit[0][1]))
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("least squares fit")
def tenparamfunc(x,a,b,c,d,e,f,g,h,i,j):
return a+b*x+c*x**2+d*x**3+e*x**4+f*x**5+g*x**6+h*x**7+i*x**8+j*x**9
fit2 = optimization.curve_fit(tenparamfunc,x,data)
fit2[0]
plt.plot(x,2*x+noise, 'go')
c = fit2[0]
plt.plot(x, tenparamfunc(x,c[0],c[1],c[2],c[3],c[4],c[5],c[6],c[7],c[8],c[9]))
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("fit for function with ten parameters")
# equal errors (homoschedastic)
errors_uniform = np.ones(11)
#errors that vary (heteroschedastic)
errors_poisson = np.sqrt(data)
#visualize this
plt.errorbar(x,data,yerr=errors_uniform, fmt='go')
plt.xlim(0,10)
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("homoschedastic error bars")
plt.errorbar(x,data,yerr=errors_uniform, fmt='go')
plt.xlim(0,10)
plt.plot(x, slopeintfunc(x,fit[0][0],fit[0][1]))
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("underestimated error bars (or bad model)")
plt.errorbar(x,data,yerr=errors_uniform*2.5, fmt='go')
plt.xlim(0,10)
plt.plot(x, slopeintfunc(x,fit[0][0],fit[0][1]))
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("error bars consistent with model")
plt.errorbar(x,data,yerr=errors_poisson, fmt='go')
plt.xlim(-1,11)
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("heteroschedastic error bars")
lsq_weighted=sum(1/errors_poisson**2*(data-model)**2)
lsq_weighted
x3=np.arange(10)+1
model3=2*x3
noise3 = (np.random.rand(10)-0.5)*10
data3= 2*x3+noise3
errors_poisson3 = np.sqrt(data3)
lsq_weighted=sum(1/errors_poisson3**2*(data3-model3)**2)
lsq_weighted
fit_weighted = optimization.curve_fit(slopeintfunc,x3,data3, sigma=errors_poisson3)
fit_unweighted = optimization.curve_fit(slopeintfunc,x3,data3)
plt.errorbar(x3,data3,yerr=errors_poisson3, fmt='go')
plt.xlim(0,11)
plt.ylim(0,25)
plt.plot(x3, slopeintfunc(x3,fit_weighted[0][0],fit_weighted[0][1]), label='weighted')
plt.plot(x3, slopeintfunc(x3,fit_unweighted[0][0],fit_unweighted[0][1]), 'r--', label='unweighted')
plt.legend(loc='lower right',)
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.title("weighted vs. unweighted fits")
chisq1 = sum((model1-data)**2/model1)
chisq1
x3=np.arange(10)+1
model3=2*x3
noise3 = (np.random.rand(10)-0.5)*10
data3= 2*x3+noise3
chisq1 = sum((model3-data3)**2/model3)
chisq1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Given an ordered binary tree $t$, the expression $t.\texttt{isEmpty}()$ checks whether $t$ is the empty tree.
Step2: Given an ordered binary tree $t$ and a key $k$, the expression $t.\texttt{member}(k)$ returns True if the key $k$ is stored in the tree $t$.
Step3: The method $\texttt{insert}()$ is specified via recursive equations.
Step4: The method $\texttt{self}.\texttt{delete}(k)$ removes the key $k$ from the tree $\texttt{self}$. It is defined as follows
Step5: The method $\texttt{self}.\texttt{delMin}()$ removes the smallest key from the given tree $\texttt{self}$
Step6: Given two ordered binary trees $s$ and $t$, the expression $s.\texttt{update}(t)$ overwrites the attributes of $s$ with the corresponding attributes of $t$.
Step7: The function $\texttt{restore}(\texttt{self})$ restores the balancing condition of the given binary tree
Step8: The function $\texttt{self}.\texttt{_setValues}(k, l, r)$ overwrites the member variables of the node $\texttt{self}$ with the given values.
Step9: The function $\texttt{createNode}(k, l, r)$ creates an AVL-tree of that has the key $k$ stored at its root,
Step10: The method $t.\texttt{pop}()$ take an AVL tree $t$ and removes and returns the smallest key that is present in $t$. It is specified as follows
Step11: Display Code
Step12: Given an ordered binary tree, this function renders the tree graphically using graphviz.
Step13: This method assigns a unique identifier with each node. The dictionary NodeDict maps these identifiers to the nodes where they occur.
Step14: This method counts all nodes in the tree.
Step15: Testing
Step16: Let's generate an ordered binary tree with random keys.
Step17: This tree looks more or less balanced. Lets us try to create a tree by inserting sorted numbers because that resulted in linear complexity for ordered binary trees.
Step18: Next, we compute the set of prime numbers $\leq 100$. Mathematically, this set is given as follows
|
<ASSISTANT_TASK:>
Python Code:
class Set:
def __init__(self):
self.mKey = None
self.mLeft = None
self.mRight = None
self.mHeight = 0
def isEmpty(self):
return self.mKey is None
Set.isEmpty = isEmpty
Set.__bool__ = isEmpty
def __bool__(self):
return self.mKey is not None
Set.__bool__ = __bool__
def member(self, key):
if self.isEmpty():
return
elif self.mKey == key:
return True
elif key < self.mKey:
return self.mLeft.member(key)
else:
return self.mRight.member(key)
Set.member = member
Set.__contains__ = member
def insert(self, key):
if self.isEmpty():
self.mKey = key
self.mLeft = Set()
self.mRight = Set()
self.mHeight = 1
elif self.mKey == key:
pass
elif key < self.mKey:
self.mLeft.insert(key)
self._restore()
else:
self.mRight.insert(key)
self._restore()
Set.insert = insert
def delete(self, key):
if self.isEmpty():
return
if key == self.mKey:
if self.mLeft.isEmpty():
self._update(self.mRight)
elif self.mRight.isEmpty():
self._update(self.mLeft)
else:
self.mRight, self.mKey = self.mRight._delMin()
elif key < self.mKey:
self.mLeft.delete(key)
else:
self.mRight.delete(key)
Set.delete = delete
def _delMin(self):
if self.mLeft.isEmpty():
return self.mRight, self.mKey
else:
ls, km = self.mLeft._delMin()
self.mLeft = ls
self._restore()
return self, km
Set._delMin = _delMin
def _update(self, t):
self.mKey = t.mKey
self.mLeft = t.mLeft
self.mRight = t.mRight
self.mHeight = t.mHeight
Set._update = _update
def _restore(self):
if abs(self.mLeft.mHeight - self.mRight.mHeight) <= 1:
self._restoreHeight()
return
if self.mLeft.mHeight > self.mRight.mHeight:
k1, l1, r1 = self.mKey, self.mLeft, self.mRight
k2, l2, r2 = l1.mKey, l1.mLeft, l1.mRight
if l2.mHeight >= r2.mHeight:
self._setValues(k2, l2, createNode(k1, r2, r1))
else:
k3, l3, r3 = r2.mKey, r2.mLeft, r2.mRight
self._setValues(k3, createNode(k2, l2, l3),
createNode(k1, r3, r1))
elif self.mRight.mHeight > self.mLeft.mHeight:
k1, l1, r1 = self.mKey, self.mLeft, self.mRight
k2, l2, r2 = r1.mKey, r1.mLeft, r1.mRight
if r2.mHeight >= l2.mHeight:
self._setValues(k2, createNode(k1, l1, l2), r2)
else:
k3, l3, r3 = l2.mKey, l2.mLeft, l2.mRight
self._setValues(k3, createNode(k1, l1, l3),
createNode(k2, r3, r2))
self._restoreHeight()
Set._restore = _restore
def _setValues(self, k, l, r):
self.mKey = k
self.mLeft = l
self.mRight = r
Set._setValues = _setValues
def _restoreHeight(self):
self.mHeight = max(self.mLeft.mHeight, self.mRight.mHeight) + 1
Set._restoreHeight = _restoreHeight
def createNode(key, left, right):
node = Set()
node.mKey = key
node.mLeft = left
node.mRight = right
node.mHeight = max(left.mHeight, right.mHeight) + 1
return node
def pop(self):
if self.mKey == None:
raise KeyError
if self.mLeft.mKey == None:
key = self.mKey
self._update(self.mRight)
return key
return self.mLeft.pop()
Set.pop = pop
import graphviz as gv
def toDot(self):
Set.sNodeCount = 0 # this is a static variable of the class Set
dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'})
NodeDict = {}
self._assignIDs(NodeDict)
for n, t in NodeDict.items():
if t.mKey != None:
dot.node(str(n), label=str(t.mKey))
else:
dot.node(str(n), label='', shape='point')
for n, t in NodeDict.items():
if not t.mLeft == None:
dot.edge(str(n), str(t.mLeft.mID))
if not t.mRight == None:
dot.edge(str(n), str(t.mRight.mID))
return dot
Set.toDot = toDot
def _assignIDs(self, NodeDict):
Set.sNodeCount += 1
self.mID = Set.sNodeCount
NodeDict[self.mID] = self
if self.isEmpty():
return
self.mLeft ._assignIDs(NodeDict)
self.mRight._assignIDs(NodeDict)
Set._assignIDs = _assignIDs
def __len__(self):
if self.isEmpty():
return 0
return 1 + len(self.mLeft) + len(self.mRight)
Set.__len__ = __len__
def demo():
m = Set()
m.insert("anton")
m.insert("hugo")
m.insert("gustav")
m.insert("jens")
m.insert("hubert")
m.insert("andre")
m.insert("philipp")
m.insert("rene")
return m
t = demo()
t.toDot()
while not t.isEmpty():
print(t.pop())
display(t.toDot())
import random as rnd
t = Set()
for k in range(30):
k = rnd.randrange(100)
t.insert(k)
display(t.toDot())
while not t.isEmpty():
print(t.pop(), end=' ')
display(t.toDot())
t = Set()
for k in range(30):
t.insert(k)
display(t.toDot())
while not t.isEmpty():
print(t.pop(), end=' ')
display(t.toDot())
S = Set()
for k in range(2, 101):
S.insert(k)
display(S.toDot())
for i in range(2, 101):
for j in range(2, 101):
S.delete(i * j)
display(S.toDot())
while not S.isEmpty():
print(S.pop(), end=' ')
display(S.toDot())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
#self.activation_function = lambda x : sigmoid(x) # Replace 0 with your sigmoid calculation.
self.activation_function = lambda x: 1/(1+np.exp(-x))
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 1/(1+np.exp(-x)) # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
#def sigmoid(x):
# return 1/(1+np.exp(-x))
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs =np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
#delta=output_error_term*hidden_outputs
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.matmul(hidden_outputs,self.weights_hidden_to_output)
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y-final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = error*(self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term*X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term*hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr*delta_weights_h_o/ n_records# update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr*delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 5000
learning_rate = 0.5
hidden_nodes = 30
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Planet OS API Demo for Model Comparison
Step2: Let's choose a location near Oahu, Hawaii, to make use of the regional SWAN model we have available...
Step3: At the moment, there is no method to automatically and reliably associate different variable names in each model to one another. Therefore for each model, we give either the variable name or False if the variable is not available.
Step4: For clarity, we extract the data and create a plot for each variable separately...
Step5: 2m Temperature Plot
Step6: Significant Wave Height Plot
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import dateutil.parser
import datetime
from urllib.request import urlopen, Request
import simplejson as json
def extract_reference_time(API_data_loc):
Find reference time that corresponds to most complete forecast. Should be the earliest value.
reftimes = set()
for i in API_data_loc['entries']:
reftimes.update([i['axes']['reftime']])
reftimes=list(reftimes)
if len(reftimes)>1:
reftime = reftimes[0] if dateutil.parser.parse(reftimes[0])<dateutil.parser.parse(reftimes[1]) else reftimes[1]
else:
reftime = reftimes[0]
return reftime
latitude = 21.205
longitude = -158.35
apikey = open('APIKEY').readlines()[0].strip() #'YOUR-API-KEY-GOES-HERE'
datasets = {'noaa_gfs_global_sflux_0.12d':['Temperature_surface','Temperature_height_above_ground',False],
'bom_access-g_global_40km':['sfc_temp','temp_scrn',False],
'pacioos_swan_oahu':[False,False,'shgt'],
'noaa_ww3_global_0.5d':[False,False,'Significant_height_of_wind_waves_surface'],
'hycom_glby0.08_93.0_global':['water_temp',False,False]}
API_data = {}
for i in datasets:
print (i)
variables = ','.join([n for n in datasets[i] if not n == False])
API_url = "http://api.planetos.com/v1/datasets/{3}/point?lon={0}&lat={1}&count=2000&verbose=false&apikey={2}&var={4}".format(longitude,latitude,apikey,i,variables)
request = Request(API_url)
response = urlopen(request)
API_data[i] = json.loads(response.read())
fig = plt.figure(figsize=(15,10))
plt.title('Surface temperature comparison')
ax = fig.add_subplot(111)
for i in datasets:
time_axes = []
data_axes = []
## API response may give more than one reference time by default, choose only the last full forecast by default
reftime = extract_reference_time(API_data[i])
for d in API_data[i]['entries']:
for j in d['data']:
if j == datasets[i][0]:
## rought method to check if we expect kelvin or celsius
add_number = d['data'][j] if d['data'][j] < 200. else d['data'][j] -273.15
if d['axes']['reftime'] == reftime:
## Filter out deeper ocean levels, for HYCOM
if 'z' in d['axes']:
if d['axes']['z'] < 1.:
data_axes.append(add_number)
time_axes.append(dateutil.parser.parse(d['axes']['time']))
else:
data_axes.append(add_number)
time_axes.append(dateutil.parser.parse(d['axes']['time']))
plt.plot(time_axes,data_axes,label=i)
plt.legend()
fig.autofmt_xdate()
fig = plt.figure(figsize=(15,10))
ax = fig.add_subplot(111)
for i in datasets:
time_axes = []
data_axes = []
reftime = extract_reference_time(API_data[i])
for d in API_data[i]['entries']:
for j in d['data']:
if j == datasets[i][1]:
## rought method to check if we expect kelvin or celsius
add_number = d['data'][j] if d['data'][j] < 200. else d['data'][j] -273.15
if d['axes']['reftime'] == reftime:
data_axes.append(add_number)
time_axes.append(dateutil.parser.parse(d['axes']['time']))
plt.plot(time_axes,data_axes,label=i)
plt.legend()
fig.autofmt_xdate()
fig = plt.figure(figsize=(15,10))
ax = fig.add_subplot(111)
for i in datasets:
#for i in ['noaa_gfs_global_sflux_0.12d']:
time_axes = []
data_axes = []
for d in API_data[i]['entries']:
#for k in API_data[d]:
# print(k)
for j in d['data']:
if j == datasets[i][2]:
data_axes.append(d['data'][j])
time_axes.append(dateutil.parser.parse(d['axes']['time']))
plt.plot(time_axes,data_axes,label=i)
plt.legend()
fig.autofmt_xdate()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Regression Project
Step2: Exercise 2
Step3: Modeling
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
! chmod 600 kaggle.json && (ls ~/.kaggle 2>/dev/null || mkdir ~/.kaggle) && mv kaggle.json ~/.kaggle/ && echo 'Done'
# Add code and text blocks to explore the data and explain your work
# Add code and text blocks to build and validate a model and explain your work
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Creating and modifying a hierarchy object
Step2: The method get_graph returns the graph object corresponding to the provided graph id.
Step3: The method get_typing returns the dictionary object corresponding to the provided hierarchy edge and representing the associated graph homomorphism.
Step4: 2. Rewriting of objects in a hierarchy
Step5: The created rule removes the edge 1->2, adds the new node 3 and two edges 3->1 and 3->2.
Step6: Let us fixed the desired instance
Step7: We try to apply the rule to the selected instance as is in the strict rewriting mode.
Step8: We have failed to rewrite G, because we have not specified typing for the newly added node 3. Let us try again, but this time we will prove such typing.
Step9: We will now create a rule that applied to T and that clones the node agent into two nodes.
Step10: We try to apply the created rule to the graph T in the strict mode.
Step11: We have failed to rewrite T, because we have not specified typing for instances of agent in $p$. Let us try again, but this time we will prove such typing.
Step12: Let us relabel nodes in T.
Step13: 2.2. Rewriting and propagation
Step14: Some of the graphs in the hierarchy are now typed by multiple graphs, which is reflected in the types of nodes, as in the example below
Step15: NB
Step16: 2.3. Rewriting and propagation
Step17: We have created a rule that clones the node a and reconnects the edges between a and b.
Step18: We rewrite the graph shapes with the fixed instances (so, the node circle is cloned).
Step19: Observe the following plots, the cloning of circle was propagated to all the ancestors of shapes, because we didn't specify how to retype intances of circle for these ancestors using the p_typing parameter. This is an example of previously mentioned backward propagation.
Step20: Even the rule r1 was affected as the result of propagation, all its circle nodes were cloned.
Step21: Let us now consider a small example of forward propagation. We will create a rule that performs some additions and merges of nodes.
Step22: Application of this rule will merge nodes bad_circle and good_circle in the graph g2. It with then add a new node and connect it with an edge to the merged node. Let us specify some typings of the new node in the RHS
Step23: 3. Creating and modifying a hierarchy object
|
<ASSISTANT_TASK:>
Python Code:
from regraph import NXGraph, NXHierarchy, Rule
from regraph import plot_graph, plot_instance, plot_rule
%matplotlib inline
# Define graph G
g = NXGraph()
g.add_nodes_from(["protein", "binding", "region", "compound"])
g.add_edges_from([("region", "protein"), ("protein", "binding"), ("region", "binding"), ("compound", "binding")])
# Define graph T
t = NXGraph()
t.add_nodes_from(["action", "agent"])
t.add_edges_from([("agent", "agent"), ("agent", "action")])
# Create a hierarchy
simple_hierarchy = NXHierarchy()
simple_hierarchy.add_graph("G", g, {"name": "Simple protein interaction"})
simple_hierarchy.add_graph("T", t, {"name": "Agent interaction"})
simple_hierarchy.add_typing(
"G", "T",
{"protein": "agent",
"region": "agent",
"compound": "agent",
"binding": "action",
}
)
print(simple_hierarchy)
type(simple_hierarchy.get_graph("T"))
simple_hierarchy.get_typing("G", "T")
t_node_positions = plot_graph(simple_hierarchy.get_graph("T"))
g_node_positions = plot_graph(simple_hierarchy.get_graph("G"))
lhs = NXGraph()
lhs.add_nodes_from([1, 2])
lhs.add_edges_from([(1, 2)])
p = NXGraph()
p.add_nodes_from([1, 2])
p.add_edges_from([])
rhs = NXGraph()
rhs.add_nodes_from([1, 2, 3])
rhs.add_edges_from([(3, 1), (3, 2)])
# By default if `p_lhs` and `p_rhs` are not provided
# to a rule, it tries to construct this homomorphisms
# automatically by matching the names. In this case we
# have defined lhs, p and rhs in such a way that that
# the names of the matching nodes correspond
rule = Rule(p, lhs, rhs)
plot_rule(rule)
instances = simple_hierarchy.find_matching("G", rule.lhs)
print("Instances: ", instances)
for instance in instances:
plot_instance(
simple_hierarchy.get_graph("G"),
rule.lhs,
instance,
parent_pos=g_node_positions) #filename=("instance_example_%d.png" % i))
instance = {
1: "protein",
2: "binding"
}
try:
rhs_instance = simple_hierarchy.rewrite("G", rule, instance, strict=True)
except Exception as e:
print("Error message: ", e)
print("Type: ", type(e))
rhs_typing = {
"T": {3: "agent"}
}
rhs_instance = simple_hierarchy.rewrite(
"G", rule, instance, rhs_typing=rhs_typing, strict=True)
print("Instance of the RHS in G", rhs_instance)
plot_instance(
simple_hierarchy.get_graph("G"),
rule.rhs,
rhs_instance,
parent_pos=g_node_positions)
lhs = NXGraph()
lhs.add_nodes_from(["agent"])
rule = Rule.from_transform(lhs)
_, rhs_clone = rule.inject_clone_node("agent")
plot_rule(rule)
instance = {
"agent": "agent"
}
try:
rhs_instance = simple_hierarchy.rewrite("T", rule, instance, strict=True)
except Exception as e:
print("Error message: ", e)
print("Type: ", type(e))
p_typing = {
"G": {
'protein': 'agent',
'region': 'agent',
'compound': rhs_clone,
3: 'agent'
}
}
rhs_instance = simple_hierarchy.rewrite("T", rule, instance, p_typing=p_typing, strict=True)
print("Instance of the RHS in G", rhs_instance)
plot_instance(
simple_hierarchy.get_graph("T"),
rule.rhs,
rhs_instance,
parent_pos=t_node_positions)
simple_hierarchy.relabel_graph_node('T', rhs_instance['agent'], 'organic_agent')
simple_hierarchy.relabel_graph_node('T', rhs_instance[rhs_clone], 'non_organic_agent')
plot_graph(simple_hierarchy.get_graph('T'))
print(simple_hierarchy.get_typing("G", "T"))
hierarchy = NXHierarchy()
colors = NXGraph()
colors.add_nodes_from([
"green", "red"
])
colors.add_edges_from([
("red", "green"),
("red", "red"),
("green", "green")
])
hierarchy.add_graph("colors", colors)
shapes = NXGraph()
shapes.add_nodes_from(["circle", "square"])
shapes.add_edges_from([
("circle", "square"),
("square", "circle"),
("circle", "circle")
])
hierarchy.add_graph("shapes", shapes)
quality = NXGraph()
quality.add_nodes_from(["good", "bad"])
quality.add_edges_from([
("bad", "bad"),
("bad", "good"),
("good", "good")
])
hierarchy.add_graph("quality", quality)
g1 = NXGraph()
g1.add_nodes_from([
"red_circle",
"red_square",
])
g1.add_edges_from([
("red_circle", "red_square"),
("red_circle", "red_circle"),
("red_square", "red_circle")
])
g1_colors = {
"red_circle": "red",
"red_square": "red",
}
g1_shapes = {
"red_circle": "circle",
"red_square": "square",
}
hierarchy.add_graph("g1", g1)
hierarchy.add_typing("g1", "colors", g1_colors)
hierarchy.add_typing("g1", "shapes", g1_shapes)
g2 = NXGraph()
g2.add_nodes_from([
"good_circle",
"good_square",
"bad_circle",
])
g2.add_edges_from([
("good_circle", "good_square"),
("good_square", "good_circle"),
("bad_circle", "good_circle"),
("bad_circle", "bad_circle"),
])
g2_shapes = {
"good_circle": "circle",
"good_square": "square",
"bad_circle": "circle"
}
g2_quality = {
"good_circle": "good",
"good_square": "good",
"bad_circle": "bad",
}
hierarchy.add_graph("g2", g2)
hierarchy.add_typing("g2", "shapes", g2_shapes)
hierarchy.add_typing("g2", "quality", g2_quality)
g3 = NXGraph()
g3.add_nodes_from([
"good_red_circle",
"bad_red_circle",
"good_red_square",
])
g3.add_edges_from([
("bad_red_circle", "good_red_circle"),
("good_red_square", "good_red_circle"),
("good_red_circle", "good_red_square")
])
g3_g1 = {
"good_red_circle": "red_circle",
"bad_red_circle": "red_circle",
"good_red_square": "red_square"
}
g3_g2 = {
"good_red_circle": "good_circle",
"bad_red_circle": "bad_circle",
"good_red_square": "good_square",
}
hierarchy.add_graph("g3", g3)
hierarchy.add_typing("g3", "g1", g3_g1)
hierarchy.add_typing("g3", "g2", g3_g2)
for graph in hierarchy.graphs():
print("Graph ", graph)
plot_graph(hierarchy.get_graph(graph))
print(hierarchy)
print("Node types in G3:\n")
for node in hierarchy.get_graph("g3").nodes():
print(node, hierarchy.node_type("g3", node))
lhs = NXGraph()
lhs.add_nodes_from([1, 2])
lhs.add_edges_from([(1, 2)])
p = NXGraph()
p.add_nodes_from([1, 11, 2])
p.add_edges_from([(1, 2)])
rhs = NXGraph.copy(p)
rhs.add_nodes_from([3])
p_lhs = {1: 1, 11: 1, 2: 2}
p_rhs = {1: 1, 11: 11, 2: 2}
r1 = Rule(p, lhs, rhs, p_lhs, p_rhs)
hierarchy.add_rule("r1", r1, {"desc": "Rule 1: typed by two graphs"})
lhs_typing1 = {1: "red_circle", 2: "red_square"}
rhs_typing1 = {3: "red_circle"}
lhs_typing2 = {1: "good_circle", 2: "good_square"}
rhs_typing2 = {3: "bad_circle"}
hierarchy.add_rule_typing("r1", "g1", lhs_typing1, rhs_typing1)
hierarchy.add_rule_typing("r1", "g2", lhs_typing2, rhs_typing2)
plot_rule(hierarchy.get_rule('r1'))
g1_lhs_typing, g1_rhs_typing = hierarchy.get_typing('r1', 'g1')
g2_lhs_typing, g2_rhs_typing = hierarchy.get_typing('r1', 'g2')
print("Typing of R1 by G1: ")
print("\tLHS", g1_lhs_typing)
print("\tP (is implicit)")
print("\tRHS", g1_rhs_typing)
print("Typing of R1 by G2: ")
print("\tLHS", g2_lhs_typing)
print("\tP (is implicit)")
print("\tRHS", g2_rhs_typing)
lhs = NXGraph()
lhs.add_nodes_from(["a", "b"])
lhs.add_edges_from([
("a", "b"),
("b", "a")
])
p = NXGraph()
p.add_nodes_from(["a", "a1", "b"])
p.add_edges_from([
("a", "b"),
("a1", "b")
])
rhs = NXGraph.copy(p)
rule = Rule(
p, lhs, rhs,
{"a": "a", "a1": "a", "b": "b"},
{"a": "a", "a1": "a1", "b": "b"},
)
plot_rule(rule)
instances = hierarchy.find_matching("shapes", lhs)
print("Instances:")
for instance in instances:
print(instance)
plot_instance(hierarchy.get_graph("shapes"), rule.lhs, instance)
rhs_instances = hierarchy.rewrite("shapes", rule, {"a": "circle", "b": "square"})
for graph in hierarchy.graphs():
print("Graph ", graph)
plot_graph(hierarchy.get_graph(graph))
plot_rule(hierarchy.get_rule('r1'))
pattern = NXGraph()
pattern.add_nodes_from(["a", "b"])
rule = Rule.from_transform(pattern)
rhs_node = rule.inject_merge_nodes(["a", "b"])
rule.inject_add_node("c")
rule.inject_add_edge("c", rhs_node)
instance = {
"a": "good_circle",
"b": "bad_circle",
}
old_position = plot_instance(hierarchy.get_graph("g2"), rule.lhs, instance)
plot_rule(rule)
rhs_typing = {
"shapes": {
"c": "circle"
}
}
rhs_instance = hierarchy.rewrite("g2", rule, instance, rhs_typing=rhs_typing)
for graph in hierarchy.graphs():
print("Graph ", graph)
plot_graph(hierarchy.get_graph(graph))
hierarchy_json = hierarchy.to_json()
import json
print(json.dumps(hierarchy_json, indent=" "))
new_hierarchy = NXHierarchy.from_json(hierarchy_json)
new_hierarchy == hierarchy
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Try to solve (takes around a minute)
Step2: Remove constraints on terms of the form $\hat{a}^2\hat{b}^2\dots$ or $\hat{a}\hat{b}\dots$ or those that do not contain any $\hat{a}$s or $\hat{b}$s.
Step3: Try to solve again
Step4: See which of the removed constraints fail
Step5: For the second solution
|
<ASSISTANT_TASK:>
Python Code:
import hamnonlineng as hnle
letters = 'abcde'
resonant = [hnle.Monomial(1, 'aabbEEC'), hnle.Monomial(1,'abddEEC')]
op_sum = hnle.operator_sum(letters)
sine_exp = (
hnle.sin_terms(op_sum, 3)
+hnle.sin_terms(op_sum, 5)
+hnle.sin_terms(op_sum, 7)
)
off_resonant = hnle.drop_single_mode(
hnle.drop_definitely_offresonant(
hnle.drop_matching(sine_exp.m, resonant)))
off_resonant = list(off_resonant)
print('Number of off-resonant constraints: %d'%len(off_resonant))
res = hnle.head_and_count(
hnle.solve_constraints_gecode(resonant, off_resonant, letters, maxfreq=50))
# Drop all of the form aabb...
starts_with_aabb = [_ for _ in off_resonant if _.s[4:5].lower() not in 'ab' and _.s.startswith('AABB')]
# Drop all of the form ab...
starts_with_ab = [_ for _ in off_resonant if _.s[2:3].lower() not in 'ab' and _.s.startswith('AB')]
# Drop all that do not contain any a or b
no_a_or_b = [_ for _ in off_resonant if 'a' not in _.s.lower() or 'b' not in _.s.lower()]
to_be_removed = starts_with_aabb + starts_with_ab + no_a_or_b
print('Number of constraints starting with ab or aabb or containing no a or b: %d'%len(to_be_removed))
off_resonant = hnle.drop_matching(off_resonant, to_be_removed)
off_resonant = list(off_resonant)
print('Number of new off-resonant constraints: %d'%len(off_resonant))
res = hnle.head_and_count(
hnle.solve_constraints_gecode(resonant, off_resonant, letters, maxfreq=33))
res[0]
hnle.filter_resonant(res[0], to_be_removed, letters)
res[1]
hnle.filter_resonant(res[1], to_be_removed, letters)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
np.random.seed(1)
df = pd.DataFrame({
'A' : ['one', 'one', 'two', 'three'] * 6,
'B' : ['A', 'B', 'C'] * 8,
'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 4,
'D' : np.random.randn(24),
'E' : np.random.randn(24)
})
def g(df):
return pd.pivot_table(df, values=['D','E'], index=['B'], aggfunc={'D':np.sum, 'E':np.mean})
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Since we are running on separate test data, we don't need to do a train_test_split here. But we will scale the data. Need to remember to scale the test data later!
Step2: Applying to Quasars Candidates
Step3: If you want to compare ZSPEC to ZPHOT, use the cells below for test set
Step4: Scale the test data
Step5: Not currently executing the next 2 cells, but putting the code here in case we want to do it later.
Step6: Instantiate Photo-z Algorithm of Choice
Step7: Apply Photo-z Algorithm(s)
Step8: Nadaraya-Watson
Step9: Only need this if Xtest is too big
|
<ASSISTANT_TASK:>
Python Code:
## Read in the Training Data and Instantiating the Photo-z Algorithm
%matplotlib inline
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
#data = Table.read('GTR-ADM-QSO-ir-testhighz_findbw_lup_2016_starclean.fits')
#JT PATH ON TRITON to training set after classification
#data = Table.read('/Users/johntimlin/Catalogs/QSO_candidates/Training_set/GTR-ADM-QSO-ir-testhighz_findbw_lup_2016_starclean_with_shenlabel.fits')
data = Table.read('/Users/johntimlin/Catalogs/QSO_candidates/Training_set/GTR-ADM-QSO-Trainingset-with-McGreer-VVDS-DR12Q_splitlabel_VCVcut_best.fits')
#JT PATH HOME USE SHEN ZCUT
#data = Table.read('/home/john/Catalogs/QSO_Candidates/Training_set/GTR-ADM-QSO-ir-testhighz_findbw_lup_2016_starclean_with_shenlabel.fits')
#data = data.filled()
# Remove stars
qmask = (data['zspec']>0)
qdata = data[qmask]
print len(qdata)
# X is in the format need for all of the sklearn tools, it just has the colors
#Xtrain = np.vstack([ qdata['ug'], qdata['gr'], qdata['ri'], qdata['iz'], qdata['zs1'], qdata['s1s2']]).T
Xtrain = np.vstack([np.asarray(qdata[name]) for name in ['ug', 'gr', 'ri', 'iz', 'zs1', 's1s2']]).T
#y = np.array(data['labels'])
ytrain = np.array(qdata['zspec'])
# For algorithms that need scaled data:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(Xtrain) # Don't cheat - fit only on training data
#testdata = Table.read('GTR-ADM-QSO-ir_good_test_2016_out_Stripe82all.fits')
# TEST DATA USING 3.5<z<5 zrange ON TRITON
#testdata = Table.read('/Users/johntimlin/Catalogs/QSO_candidates/Final_S82_candidates_full/GTR-ADM-QSO-ir_good_test_2016_out_Stripe82all.fits')
# TEST DATA USING 2.9<z<5.4 zrange ON HOME
#testdata = Table.read('/Users/johntimlin/Catalogs/QSO_Candidates/photoz/SpIES_SHELA_Quasar_Canidates_Shen_zrange_JTmultiproc.fits')
#testdata = Table.read('./catalogs/HZ_forphotoz.fits')
testdata = Table.read('/Users/johntimlin/Catalogs/QSO_candidates/New_training_candidates/Test_point_source_classifier/Final_sets/HZLZ_combined_all_wphotoz_alldata_allclassifiers.fits')
#Limit to objects that have been classified as quasars
#qsocandmask = ((testdata['ypredRFC']==0) | (testdata['ypredSVM']==0) | (testdata['ypredBAG']==0))
testdatacand = testdata#[qsocandmask]
print len(testdata),len(testdatacand)
## Test zspec objects with zspec >=2.9 and see how well the zphot matches with zspec
#testdata = Table.read('/Users/johntimlin/Catalogs/QSO_candidates/Final_S82_candidates_full/QSOs_S82_wzspec_wcolors.fits')
#Limit to objects that have been classified as quasars
#qsocandmask = ((testdata['ypredRFC']==0) | (testdata['ypredSVM']==0) | (testdata['ypredBAG']==0))
#qsocandmask = (testdata['ZSPEC'] >= 2.9)
#testdatacand = testdata#[qsocandmask]
#print len(testdata),len(testdatacand)
#Xtest = np.vstack([ testdatacand['ug'], testdatacand['gr'], testdatacand['ri'], testdatacand['iz'], testdatacand['zs1'], testdatacand['s1s2']]).T
Xtest = np.vstack([np.asarray(testdatacand[name]) for name in ['ug', 'gr', 'ri', 'iz', 'zs1', 's1s2']]).T
XStest = scaler.transform(Xtest) # apply same transformation to test data
# Read in KDE candidates
dataKDE = Table.read('GTR-ADM-QSO-ir-testhighz_kdephotoz_lup_2016_quasar_candidates.dat', format='ascii')
print dataKDE.keys()
print len(XKDE)
XKDE = np.vstack([ dataKDE['ug'], dataKDE['gr'], dataKDE['ri'], dataKDE['iz'], dataKDE['zch1'], dataKDE['ch1ch2'] ]).T
# Read in RF candidates
dataRF = Table.read('GTR-ADM-QSO-ir_good_test_2016_out.fits')
print dataRF.keys()
print len(dataRF)
# Canidates only
maskRF = (dataRF['ypred']==0)
dataRF = dataRF[maskRF]
print len(dataRF)
# X is in the format need for all of the sklearn tools, it just has the colors
XRF = np.vstack([ dataRF['ug'], dataRF['gr'], dataRF['ri'], dataRF['iz'], dataRF['zs1'], dataRF['s1s2']]).T
import numpy as np
from astroML.linear_model import NadarayaWatson
model = NadarayaWatson('gaussian', 0.05)
model.fit(Xtrain,ytrain)
from sklearn.ensemble import RandomForestRegressor
modelRF = RandomForestRegressor()
modelRF.fit(Xtrain,ytrain)
zphotRF = modelRF.predict(Xtest)
zphotNW = model.predict(Xtest)
from dask import compute, delayed
def process(Xin):
return model.predict(Xin)
# Create dask objects
dobjs = [delayed(process)(x.reshape(1,-1)) for x in Xtest]
import dask.threaded
ypred = compute(*dobjs, get=dask.threaded.get)
# The dask output needs to be reformatted.
zphotNW = np.array(ypred).reshape(1,-1)[0]
testdatacand['zphotNW'] = zphotNW
testdatacand['zphotRF'] = zphotRF
#TRITON PATH
#testdatacand.write('/Users/johntimlin/Catalogs/QSO_candidates/photoz/Candidates_photoz_S82_shenzrange.fits', format='fits')
#HOME PATH
#testdatacand.write('/home/john/Catalogs/QSO_Candidates/photoz/Candidates_photoz_S82_shenzrange.fits', format='fits')
testdatacand.write('./HZLZ_combined_all_hzclassifiers_wphotoz_new.fits')
from densityplot import *
from pylab import *
fig = plt.figure(figsize=(5,5))
hex_scatter(testdatacand['zphotNW'],testdatacand['ug'], min_cnt=10, levels=2, std=True, smoothing=1,
hkwargs={'gridsize': 100, 'cmap': plt.cm.Blues},
skwargs={'color': 'k'})
plt.xlabel('zphot')
plt.ylabel('u-g')
#plt.xlim([-0.1,5.5])
#plt.ylim([-0.1,5.5])
plt.show()
from astroML.plotting import hist as fancyhist
fancyhist(testdatacand['zphotRF'], bins="freedman", histtype="step")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
EOS = [target_vocab_to_int['<EOS>']]
source_id_text = [[source_vocab_to_int[s] for s in sent.split()] for sent in source_text.split('\n')]
target_id_text = [[target_vocab_to_int[t] for t in sent.split()] + EOS for sent in target_text.split('\n')]
return source_id_text, target_id_text
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
input_ = tf.placeholder(dtype=tf.int32, shape=(None, None), name='input')
targets = tf.placeholder(dtype=tf.int32, shape=(None, None), name='target')
learning_rate = tf.placeholder(dtype=tf.float32, name='lr')
keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob')
return input_, targets, learning_rate, keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers)
outputs, state = tf.nn.dynamic_rnn(cell, rnn_inputs, dtype=tf.float32)
return state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
tf.nn.dropout?
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
dynamic_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
outputs, final_state, final_context_state = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell,
decoder_fn=dynamic_fn_train,
inputs=dec_embed_input,
sequence_length=sequence_length,
scope=decoding_scope
)
tf.nn.dropout
outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
train_logits = output_fn(outputs)
return train_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn=output_fn,
encoder_state=encoder_state,
embeddings=dec_embeddings,
start_of_sequence_id=start_of_sequence_id,
end_of_sequence_id=end_of_sequence_id,
maximum_length=maximum_length-1,
num_decoder_symbols=vocab_size,
)
outputs, final_state, final_context_state = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell,
decoder_fn=infer_decoder_fn,
scope=decoding_scope
)
return outputs
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
dec_cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
dec_cell = tf.contrib.rnn.MultiRNNCell([dec_cell] * num_layers)
with tf.variable_scope("decoding") as decoding_scope:
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
train_logits = decoding_layer_train(
encoder_state,
dec_cell,
dec_embed_input,
sequence_length,
decoding_scope,
output_fn,
keep_prob
)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
infer_logits = decoding_layer_infer(
encoder_state,
dec_cell,
dec_embeddings,
target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'],
sequence_length,
vocab_size,
decoding_scope,
output_fn,
keep_prob
)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
# Apply embedding to the input data for the encoder
enc_embeddings = tf.Variable(tf.random_uniform([source_vocab_size, enc_embedding_size]))
embed_input = tf.nn.embedding_lookup(enc_embeddings, input_data)
# Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
encoder_state = encoding_layer(embed_input, rnn_size, num_layers, keep_prob)
# Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function
dec_embed_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
# Apply embedding to the target data for the decoder.
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_target = tf.nn.embedding_lookup(dec_embeddings, dec_embed_input)
# Decode the encoded input using your decoding_layer
train_logits, infer_logits = decoding_layer(
dec_target,
dec_embeddings,
encoder_state,
target_vocab_size,
sequence_length,
rnn_size,
num_layers,
target_vocab_to_int,
keep_prob
)
return (train_logits, infer_logits)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 200
decoding_embedding_size = 200
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.8
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
return [vocab_to_int['<UNK>'] if w not in vocab_to_int else vocab_to_int[w] for w in sentence.lower().split()]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The mandel function performs the Mandelbrot set calculation for a given (x,y) position on the imaginary plane. It returns the number of iterations before the computation "escapes".
Step3: create_fractal iterates over all the pixels in the image, computing the complex coordinates from the pixel coordinates, and calls the mandel function at each pixel. The return value of mandel is used to color the pixel.
Step4: Next we create a 1024x1024 pixel image as a numpy array of bytes. We then call create_fractal with appropriate coordinates to fit the whole mandelbrot set.
Step5: You can play with the coordinates to zoom in on different regions in the fractal.
Step7: Faster Execution with Numba
Step8: Let's run the @autojit code and see if it is faster.
Step9: On my desktop computer, the time to compute the 1024x1024 mandelbrot set dropped from 6.92s down to 0.06s. That's a speedup of 115x! The reason this is so much faster is that Numba uses Numpy type information to convert the dynamic Python code into statically compiled machine code, which is many times faster to execute than dynamically typed, interpreted Python code.
Step10: In CUDA, a kernel is a function that runs in parallel using many threads on the device. We can write a kernel version of our mandelbrot function by simply assuming that it will be run by a grid of threads. NumbaPro provides the familiar CUDA threadIdx, blockIdx, blockDim and gridDim intrinsics, as well as a grid() convenience function which evaluates to blockDim * blockIdx + threadIdx.
Step11: Device Memory
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import numpy as np
from timeit import default_timer as timer
def mandel(x, y, max_iters):
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iters
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
image = np.zeros((1024, 1536), dtype = np.uint8)
start = timer()
create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
dt = timer() - start
print("Mandelbrot created in %f s" % dt)
imshow(image)
create_fractal(-2.0, -1.7, -0.1, 0.1, image, 20)
imshow(image)
from numba import autojit
@autojit
def mandel(x, y, max_iters):
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iters
@autojit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
image = np.zeros((1024, 1536), dtype = np.uint8)
start = timer()
create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
dt = timer() - start
print("Mandelbrot created in %f s" % dt)
imshow(image)
from numbapro import cuda
from numba import *
mandel_gpu = cuda.jit(restype=uint32, argtypes=[f8, f8, uint32], device=True)(mandel)
@cuda.jit(argtypes=[f8, f8, f8, f8, uint8[:,:], uint32])
def mandel_kernel(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x;
gridY = cuda.gridDim.y * cuda.blockDim.y;
for x in range(startX, width, gridX):
real = min_x + x * pixel_size_x
for y in range(startY, height, gridY):
imag = min_y + y * pixel_size_y
image[y, x] = mandel_gpu(real, imag, iters)
gimage = np.zeros((1024, 1536), dtype = np.uint8)
blockdim = (32, 8)
griddim = (32,16)
start = timer()
d_image = cuda.to_device(gimage)
mandel_kernel[griddim, blockdim](-2.0, 1.0, -1.0, 1.0, d_image, 20)
d_image.to_host()
dt = timer() - start
print("Mandelbrot created on GPU in %f s" % dt)
imshow(gimage)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 使用 int16 激活值进行训练后整数量化
Step2: 检查 16x8 量化模式是否可用
Step3: 训练并导出模型
Step4: 在此示例中,您只对模型进行了一个周期的训练,因此只训练到约 96% 的准确率。
Step5: 将其写入 .tflite 文件:
Step6: 要改为将模型量化为 16x8 量化模式,首先将 optimizations 标记设置为使用默认优化。然后将 16x8 量化模式指定为目标规范中要求的受支持运算:
Step7: 对于 int8 训练后量化,通过将转换器选项 inference_input(output)_type 设置为 tf.int16,可以产生全整数量化模型。
Step8: 最后,像往常一样转换模型。请注意,为了方便调用,转换后的模型默认仍将使用浮点输入和输出。
Step9: 请注意,生成文件的大小约为原来的 1/3。
Step10: 运行 TensorFlow Lite 模型
Step11: 在单个图像上测试模型
Step12: 评估模型
Step13: 在 16x8 量化模型上重复评估:
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pathlib
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]
mnist_train, _ = tf.keras.datasets.mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
# Model has only one input so each data point has one element.
yield [input_value]
converter.representative_dataset = representative_data_gen
tflite_16x8_model = converter.convert()
tflite_model_16x8_file = tflite_models_dir/"mnist_model_quant_16x8.tflite"
tflite_model_16x8_file.write_bytes(tflite_16x8_model)
!ls -lh {tflite_models_dir}
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_16x8 = tf.lite.Interpreter(model_path=str(tflite_model_16x8_file))
interpreter_16x8.allocate_tensors()
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter_16x8.get_input_details()[0]["index"]
output_index = interpreter_16x8.get_output_details()[0]["index"]
interpreter_16x8.set_tensor(input_index, test_image)
interpreter_16x8.invoke()
predictions = interpreter_16x8.get_tensor(output_index)
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
# NOTE: This quantization mode is an experimental post-training mode,
# it does not have any optimized kernels implementations or
# specialized machine learning hardware accelerators. Therefore,
# it could be slower than the float interpreter.
print(evaluate_model(interpreter_16x8))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'dwd', 'sandbox-2', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Data
Step2: To merge these two lists together we will use the zip function.
Step3: We are basically done creating the data set. We now will use the pandas library to export this data set into a csv file.
Step4: Export the dataframe to a csv file. We can name the file births1880.csv. The function to_csv will be used to export the file. The file will be saved in the same location of the notebook unless specified otherwise.
Step5: The only parameters we will use is index and header. Setting these parameters to True will prevent the index and header names from being exported. Change the values of these parameters to get a better understanding of their use.
Step6: Get Data
Step7: Even though this functions has many parameters, we will simply pass it the location of the text file.
Step8: Notice the r before the string. Since the slashes are special characters, prefixing the string with a r will escape the whole string.
Step9: This brings us the our first problem of the exercise. The read_csv function treated the first record in the csv file as the header names. This is obviously not correct since the text file did not provide us with header names.
Step10: If we wanted to give the columns specific names, we would have to pass another paramter called names. We can also omit the header parameter.
Step11: You can think of the numbers [0,1,2,3,4] as the row numbers in an Excel file. In pandas these are part of the index of the dataframe. You can think of the index as the primary key of a sql table with the exception that an index is allowed to have duplicates.
Step12: Prepare Data
Step13: As you can see the Births column is of type int64, thus no floats (decimal numbers) or alpha numeric characters will be present in this column.
Step14: Present Data
|
<ASSISTANT_TASK:>
Python Code:
# Import all libraries needed for the tutorial
# General syntax to import specific functions in a library:
##from (library) import (specific library function)
from pandas import DataFrame, read_csv
# General syntax to import a library but no functions:
##import (library) as (give the library a nickname/alias)
import matplotlib.pyplot as plt
import pandas as pd #this is how I usually import pandas
import sys #only needed to determine Python version number
# Enable inline plotting
%matplotlib inline
print('Python version ' + sys.version)
print( 'Pandas version ' + pd.__version__)
# The inital set of baby names and bith rates
names = ['Bob','Jessica','Mary','John','Mel']
births = [968, 155, 77, 578, 973]
zip?
BabyDataSet = zip(names,births)
BabyDataSet
df = pd.DataFrame(data = dict(BabyDataSet), columns=['Names', 'Births'])
df
df.to_csv?
df.to_csv('births1880.csv',index=False,header=False)
read_csv?
Location = r'C:\Users\david\notebooks\pandas\births1880.csv'
df = pd.read_csv(Location)
df
df = pd.read_csv(Location, header=None)
df
df = pd.read_csv(Location, names=['Names','Births'])
df
import os
os.remove(Location)
# Check data type of the columns
df.dtypes
# Check data type of Births column
df.Births.dtype
# Method 1:
Sorted = df.sort(['Births'], ascending=False)
Sorted.head(1)
# Method 2:
df['Births'].max()
# Create graph
df['Births'].plot()
# Maximum value in the data set
MaxValue = df['Births'].max()
# Name associated with the maximum value
MaxName = df['Names'][df['Births'] == df['Births'].max()].values
# Text to display on graph
Text = str(MaxValue) + " - " + MaxName
# Add text to graph
plt.annotate(Text, xy=(1, MaxValue), xytext=(8, 0),
xycoords=('axes fraction', 'data'), textcoords='offset points')
print("The most popular name")
df[df['Births'] == df['Births'].max()]
#Sorted.head(1) can also be used
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step7: pyConTextGraph contains the bulk of the pyConTextNLP functionality, including basic class definitions such as the ConTextMarkup class that represents the markup of a sentence.
Step9: Read the itemData definitions
Step11: Example function to analyze each sentence
Step12: We're going to start with our simplest of sentences
Step13: marking up a sentence
Step14: Clean the text
Step15: Identify concepts in the sentence
Step16: What does our initial markup look like?
Step17: What does our markup look like now?
Step18: Are there any relationships in our markup?
Step19: Apply modifiers
|
<ASSISTANT_TASK:>
Python Code:
import pyConTextNLP.pyConTextGraph as pyConText
import pyConTextNLP.itemData as itemData
import networkx as nx
reports = [
IMPRESSION: Evaluation limited by lack of IV contrast; however, no evidence of
bowel obstruction or mass identified within the abdomen or pelvis. Non-specific interstitial opacities and bronchiectasis seen at the right
base, suggestive of post-inflammatory changes.,
IMPRESSION: Evidence of early pulmonary vascular congestion and interstitial edema. Probable scarring at the medial aspect of the right lung base, with no
definite consolidation.
,
IMPRESSION:
1. 2.0 cm cyst of the right renal lower pole. Otherwise, normal appearance
of the right kidney with patent vasculature and no sonographic evidence of
renal artery stenosis.
2. Surgically absent left kidney.,
IMPRESSION: No pneumothorax.,
IMPRESSION: No definite pneumothorax
IMPRESSION: New opacity at the left lower lobe consistent with pneumonia.
]
modifiers = itemData.instantiateFromCSVtoitemData(
"https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/lexical_kb_05042016.tsv")
targets = itemData.instantiateFromCSVtoitemData(
"https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/utah_crit.tsv")
def markup_sentence(s, modifiers, targets, prune_inactive=True):
markup = pyConText.ConTextMarkup()
markup.setRawText(s)
markup.cleanText()
markup.markItems(modifiers, mode="modifier")
markup.markItems(targets, mode="target")
markup.pruneMarks()
markup.dropMarks('Exclusion')
# apply modifiers to any targets within the modifiers scope
markup.applyModifiers()
markup.pruneSelfModifyingRelationships()
if prune_inactive:
markup.dropInactiveModifiers()
return markup
reports[3]
markup = pyConText.ConTextMarkup()
isinstance(markup,nx.DiGraph)
#### Set the text to be processed
markup.setRawText(reports[3].lower())
print(markup)
print(len(markup.getRawText()))
markup.cleanText()
print(markup)
print(len(markup.getText()))
markup.markItems(modifiers, mode="modifier")
print(markup.nodes(data=True))
print(type(markup.nodes()[0]))
markup.markItems(targets, mode="target")
print(markup.nodes(data=True))
markup.pruneMarks()
print(markup.nodes())
print(markup.edges())
markup.applyModifiers()
print(markup.edges())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How many attacks will happen between the beginning of 2001 and the end of 2099
Step2: What year will see the most vandalism?
Step3: The least?
Step4: What will be the longest gap between attacks?
|
<ASSISTANT_TASK:>
Python Code:
import datetime
from collections import Counter
start = datetime.date(2001, 1, 1)
end = datetime.date(2100, 1, 1) - datetime.timedelta(days=1)
d = start
anarchy_dates = []
delta = datetime.timedelta(days=1)
while d <= end:
if d.day * d.month == d.year % 100:
anarchy_dates.append(d)
d += delta
anarchy_dates.sort()
len(anarchy_dates)
date_counts = Counter(d.year for d in anarchy_dates)
_, max_attacks = date_counts.most_common()[0]
_, min_attacks = date_counts.most_common()[-1]
for year, attacks in date_counts.items():
if attacks == max_attacks:
print(f'{attacks} attacks in year {year}')
for year, attacks in date_counts.items():
if attacks == min_attacks:
print(f'{attacks} attacks in year {year}')
gaps = [this-previous for this,previous in zip(anarchy_dates[1:], anarchy_dates)]
max_gap = max(gaps)
for i, gap in enumerate(gaps):
if gap == max_gap:
print(f'{gap.days} days between {anarchy_dates[i-1]} and {anarchy_dates[i]}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also initialize the GPU, and instantiate the Python interfaces to the GPU codes to get the GPU kernels compiled.
Step2: The next step is to compute the correlation tables for both criteria using the sample input we read in earlier. The Quadratic Difference criterion is computed by the QuadraticDifferenceSparse object on the GPU, for the 3B criterion we use the correlations_cpu_3B function CPU implemented in Python.
Step3: Let's start our comparison by looking at the two correlation matrices we've just produced.
Step4: We can clearly see that the Match 3B criterion produces fewer correlations than the quadratic difference criterion. However, it may be more informative to look at the distributions of the number of correlated hits per hit.
Step5: We can clearly see that the number of correlated hits per hit is lower for all hits using the 3B criterion. The distribution is more narrow because there are more hits that have a similar 'degree'.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pyplot
from km3net.kernels import QuadraticDifferenceSparse, PurgingSparse
import km3net.util as util
window_width = 1500
N,x,y,z,ct = util.get_real_input_data("sample1.txt")
print ("Read", N, "hits from file")
context, cc = util.init_pycuda()
qd_kernel = QuadraticDifferenceSparse(N, cc=cc)
purging = PurgingSparse(N, cc)
#obtain sparse matrix for Quadratic Difference and convert to dense
d_col_idx, d_prefix_sums, d_degrees, total_hits = qd_kernel.compute(x, y, z, ct)
matrix_qd = util.sparse_to_dense(d_prefix_sums, d_col_idx, N, total_hits)
#obtain correlation matrix for Match 3B criterion
correlations_3b = np.zeros((window_width,N), dtype=np.uint8)
correlations_3b = util.correlations_cpu_3B(correlations_3b, x, y, z, ct)
matrix_3b = util.get_full_matrix(correlations_3b)
f, (ax1, ax2) = pyplot.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
f.tight_layout()
ax1.set_title('quadratic difference')
ax2.set_title('match 3b')
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')
ax1.imshow(matrix_qd, cmap=pyplot.cm.bone, interpolation='nearest')
ax2.imshow(matrix_3b, cmap=pyplot.cm.bone, interpolation='nearest')
pyplot.hist(np.sum(matrix_qd, axis=0), bins=100, alpha=0.5, label='quadratic difference')
pyplot.hist(np.sum(matrix_3b, axis=0), bins=100, alpha=0.5, label='match 3b')
pyplot.legend(loc='upper right')
pyplot.xlabel('# correlated hits per hit')
pyplot.ylabel('# of hits')
clique = purging.compute(*util.dense_to_sparse(matrix_qd))
print("Quadratic Difference resulted in clique of size", len(clique))
print(clique)
clique = purging.compute(*util.dense_to_sparse(matrix_3b))
print("Match 3B resulted in clique of size", len(clique))
print(clique)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tasks
Step2: 1. Display all occupations
Step3: 2. Chose an occupation and select all users with this occupation. Only show user information and hide the users' movie ratings.
Step4: 3. Count the number of men in the database.
Step5: 4. Select all users whose zipcode starts with a 5. Only show the user ID and zipcode.
Step6: 5. Select all movies from the year 1998 and category comedy.
Step7: 6. Count the number of movies from the year 1990 and 1995.
Step8: 7. Display all movies published before the year 1992.
Step9: 8. Imagine that you registered for MovieLens. Create a new user with your user data. Do not include any ratings.
Step10: 9. Update the user record you created in the previous query and insert a new rating for a movie of your choice. You will need to use $addToSet (https
Step11: 10. A query of your choice.
|
<ASSISTANT_TASK:>
Python Code:
import pymongo
from pymongo import MongoClient
import datetime
import re
from pymongo import InsertOne, DeleteOne, ReplaceOne
import datetime
client = MongoClient()
client = MongoClient('mongodb://localhost:27017/')
db = client.homework2
users = db.users
movies = db.movies
movieList = movies.find({"_id": 1})
for movie in movieList:
print movie
occupations = sorted(users.distinct(u'occupation'))
for occupation in occupations:
print occupation
# We will choose 'technician/engineer' and hide movie reviews - limit to first 10 results
engineers = users.find({u'occupation': 'technician/engineer'},
{u'movies':0}).limit(10)
for engineer in engineers:
print engineer
men = users.find({u'gender': 'M'}).count()
print "There are " + str(men) + " men in the database."
regex = re.compile("^5.*")
by_zipcodes = users.find({u'zipcode': regex},
{u'_id':1, u'zipcode': 1}).limit(10)
for zipcode in by_zipcodes:
print zipcode
comedies_from_1998 = movies.find({u'year': 1998, u'category': u'Comedy'}).limit(10)
for comedy in comedies_from_1998:
print comedy
movies_between_1990_and_1995 = movies.find({u'year': {'$gte': 1990, '$lte': 1995}}).count()
print 'There are ' + str(movies_between_1990_and_1995) + ' movies between 1990 and 1995'
movies_before_1992 = movies.find({u'year': {'$lt': 1992}})\
.limit(10)\
.sort('year', pymongo.ASCENDING)
for movie in movies_before_1992:
print movie
# users.delete_one({u'zipcode': u'68102'})
me = users.find_one({u'zipcode': u'68102'})
if me is not None:
print me
else:
max_id = users.find_one({u'zipcode': u'68102'})
myself = {u'gender': u'M',
u'age': u'25-34',
u'zipcode': u'68102',
u'occupation': u'technician/engineer'}
result = db.users.insert_one(myself)
my_id = result.inserted_id
print 'created my profile with id ' + str(my_id)
movie_review = {u'moveID': 3272, # Bad Lieutenant
u'rating': 5,
u'timestamp': datetime.datetime.utcnow()}
users.update_one(
{ u'zipcode': u'68102'},
{ "$addToSet":{"movies": movie_review} },
upsert=True)
me = users.find_one({u'zipcode': u'68102'})
print me
regex = re.compile("^681.*")
by_zipcodes = users.find({u'zipcode': regex},
{u'_id':1, u'zipcode': 1}).limit(10)
for zipcode in by_zipcodes:
print zipcode
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Yo creo que el punto está entendido... Es tedioso estar escribiendo lo mismo 20 veces. Ahora imagina que no tienes que hacer esto 20 veces, sino 10 000!!! Suena a mucho trabajo no? Sin embargo en python hay varias estrategias para resolverlo. Hoy veremos el for loop (o froot loop como yo le digo jejeje).
Step2: Yeiii!!! viste lo que se puede hacer con loops. Ahora te toca a ti.
Step3: Ejercicio 2
Step4: Observa lo que para cuando le pedimos a python que nos imprima cada elemento de la lista anidada
Step5: Y que pasa si queremos obtener cada elemento de todas las listas
|
<ASSISTANT_TASK:>
Python Code:
#Obtén el cuadrado de 1
#Obtén el cuadrado de 2
#Obtén el cuadrado de 3
#Obtén el cuadrado de 4
#Obtén el cuadrado de 5
#Obtén el cuadrado de 6
#Obtén el cuadrado de 7
#Obtén el cuadrado de 8
#Obtén el cuadrado de 9
#Obtén el cuadrado de 10
for numero in range(1,21):
cuadrado = numero**2
print(cuadrado)
lista = [5.9, 3.0, 2, 25.5, 14.2]
lista_anidada = [['Perro', 'Gato'], ['Joven', 'Viejo'], [1, 2]]
for elemento in lista_anidada:
print (elemento)
for elemento in lista_anidada:
for objeto in elemento:
print(objeto)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First Step
Step2: Second Step
Step3: Look at all the different shapes for different kilometers per year
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import pandas as pd
print(pd.__version__)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
import keras
print(keras.__version__)
df = pd.read_csv('./insurance-customers-1500.csv', sep=';')
y=df['group']
df.drop('group', axis='columns', inplace=True)
X = df.as_matrix()
df.describe()
# ignore this, it is just technical code
# should come from a lib, consider it to appear magically
# http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
cmap_print = ListedColormap(['#AA8888', '#004000', '#FFFFDD'])
cmap_bold = ListedColormap(['#AA4444', '#006000', '#AAAA00'])
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#FFFFDD'])
font_size=25
def meshGrid(x_data, y_data):
h = 1 # step size in the mesh
x_min, x_max = x_data.min() - 1, x_data.max() + 1
y_min, y_max = y_data.min() - 1, y_data.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return (xx,yy)
def plotPrediction(clf, x_data, y_data, x_label, y_label, colors, title="", mesh=True, fixed=None, fname=None, print=False):
xx,yy = meshGrid(x_data, y_data)
plt.figure(figsize=(20,10))
if clf and mesh:
grid_X = np.array(np.c_[yy.ravel(), xx.ravel()])
if fixed:
fill_values = np.full((len(grid_X), 1), fixed)
grid_X = np.append(grid_X, fill_values, axis=1)
Z = clf.predict(grid_X)
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
if print:
plt.scatter(x_data, y_data, c=colors, cmap=cmap_print, s=200, marker='o', edgecolors='k')
else:
plt.scatter(x_data, y_data, c=colors, cmap=cmap_bold, s=80, marker='o', edgecolors='k')
plt.xlabel(x_label, fontsize=font_size)
plt.ylabel(y_label, fontsize=font_size)
plt.title(title, fontsize=font_size)
if fname:
plt.savefig(fname)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42, stratify=y)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
# tiny little pieces of feature engeneering
from keras.utils.np_utils import to_categorical
num_categories = 3
y_train_categorical = to_categorical(y_train, num_categories)
y_test_categorical = to_categorical(y_test, num_categories)
from keras.layers import Input
from keras.layers import Dense
from keras.models import Model
from keras.layers import Dropout
inputs = Input(name='input', shape=(3, ))
x = Dense(100, name='hidden1', activation='relu')(inputs)
x = Dense(100, name='hidden2', activation='relu')(x)
predictions = Dense(3, name='softmax', activation='softmax')(x)
model = Model(input=inputs, output=predictions)
# loss function: http://cs231n.github.io/linear-classify/#softmax
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
%time model.fit(X_train, y_train_categorical, epochs=500, batch_size=100)
train_loss, train_accuracy = model.evaluate(X_train, y_train_categorical, batch_size=100)
train_accuracy
test_loss, test_accuracy = model.evaluate(X_test, y_test_categorical, batch_size=100)
test_accuracy
kms_per_year = 20
plotPrediction(model, X_test[:, 1], X_test[:, 0],
'Age', 'Max Speed', y_test,
fixed = kms_per_year,
title="Test Data Max Speed vs Age with Prediction, 20 km/year")
kms_per_year = 50
plotPrediction(model, X_test[:, 1], X_test[:, 0],
'Age', 'Max Speed', y_test,
fixed = kms_per_year,
title="Test Data Max Speed vs Age with Prediction, 50 km/year")
kms_per_year = 5
plotPrediction(model, X_test[:, 1], X_test[:, 0],
'Age', 'Max Speed', y_test,
fixed = kms_per_year,
title="Test Data Max Speed vs Age with Prediction, 5 km/year")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Queries
Step6: Database connection management object
|
<ASSISTANT_TASK:>
Python Code:
import pymysql
import os
import csv
ALL_WIKI_AGGREGATION_QUERY =
SELECT
timestamp AS month,
SUM(weighted_sum) AS weighted_sum,
SUM(LOG(weighted_sum)) AS weighted_log_sum,
SUM(prediction = "Stub") AS stub_n,
SUM(prediction = "Start") AS start_n,
SUM(prediction = "C") AS c_n,
SUM(prediction = "B") AS b_n,
SUM(prediction = "GA") AS ga_n,
SUM(prediction = "FA") AS fa_n,
COUNT(*) AS n
FROM {datasets_db_name}.monthly_wp10_enwiki
GROUP BY month;
WIKIPROJECT_AGGREGATION_QUERY =
SELECT
monthly_aq.timestamp AS month,
SUM(weighted_sum) AS weighted_sum,
SUM(LOG(weighted_sum)) AS weighted_log_sum,
SUM(prediction = "Stub") AS stub_n,
SUM(prediction = "Start") AS start_n,
SUM(prediction = "C") AS c_n,
SUM(prediction = "B") AS b_n,
SUM(prediction = "GA") AS ga_n,
SUM(prediction = "FA") AS fa_n,
COUNT(*) AS n
FROM {enwiki_db_name}.page AS talk
INNER JOIN {enwiki_db_name}.page AS article ON
talk.page_title = article.page_title AND
article.page_namespace = 0
INNER JOIN {enwiki_db_name}.templatelinks USE INDEX (tl_namespace) ON
tl_from = talk.page_id
INNER JOIN {datasets_db_name}.monthly_wp10_enwiki AS monthly_aq ON
article.page_id = monthly_aq.page_id
WHERE
talk.page_namespace = 1 AND
tl_namespace = 10 AND
(
tl_title = %(project_template)s OR
tl_title IN (
SELECT page.page_title
FROM {enwiki_db_name}.pagelinks
INNER JOIN {enwiki_db_name}.page ON page_id = pl_from
WHERE
pl_namespace = 10 AND
pl_title = %(project_template)s AND
pl_from_namespace = 10 AND
page_is_redirect
)
)
GROUP BY month;
class DBMonthlyStats:
def __init__(self, config):
self.conn = pymysql.connect(
host=config.get('database', 'replica_host'),
read_default_file=config.get('database', 'read_default_file'))
self.all_wiki_aggregation_query = ALL_WIKI_AGGREGATION_QUERY.format(
datasets_db_name=config.get('database', 'datasets_db_name'))
self.wikiproject_aggregation_query = WIKIPROJECT_AGGREGATION_QUERY.format(
datasets_db_name=config.get('database', 'datasets_db_name'),
enwiki_db_name=config.get('database', 'enwiki_db_name'))
def all_wiki_aggregation(self):
Generate a cross-wiki monthly-aggregate dataset.
Returns a cursor that iterates over tuples (rows of the result set).
with self.conn.cursor() as cursor:
cursor.execute(self.all_wiki_aggregation_query)
return cursor
def wikiproject_aggregation(self, project_template):
Genrerate a wikiproject-specific monthly-aggregate dataset.
Returns a cursor that iterates over tuples (rows of the result set).
with self.conn.cursor() as cursor:
cursor.execute(self.wikiproject_aggregation_query,
{'project_template': project_template})
return cursor
def dump_aggregation(cursor, file):
headers = [i[0] for i in cursor.description]
writer = csv.writer(file, delimiter='\t', quoting=csv.QUOTE_NONE, fieldnames=headers)
writer.writeheader()
for row in cursor:
writer.writerow(row)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.2 Answer
Step2: 1.3 Answer
Step3: 1.4 Answer
Step4: 1.7 Answer
Step5: 2. CLT Theory (4 Points)
Step6: 3.1 Answer
Step7: 3.2 Answer
Step8: 3.3 Answer
Step9: 3.4 Answer
Step10: 4. Sample Statistics (17 Points)
Step11: 4.1 Answer
Step12: 4.2 Answer
Step13: 4.3 Answer
|
<ASSISTANT_TASK:>
Python Code:
from scipy import stats as ss
print(ss.expon.cdf(12, scale=36))
print(ss.binom.pmf(2, p=1 / 36, n=12))
print(ss.binom.pmf(2, p=1 / (3 * 365), n=365))
print(ss.poisson.pmf(2, mu=1 / 3))
print(ss.poisson.pmf(1, mu=1 / 3))
ss.binom?
result = ss.expon.ppf(0.99, scale = 24 * 60 / 2)
days = int(result / 24 / 60)
hours = int((result / 60 - days * 24))
minutes = (result - days * 24 * 60 - hours * 60)
print(days, hours, minutes)
ss.norm.cdf(60, scale=15, loc=90)
1 - ss.norm.cdf(93, scale=15, loc=90)
from scipy.special import comb
sum = 0
N = 10
p = 0.3
for i in range(0,N+1):
sum += i * comb(N, i) * p**i * (1 - p)**(N - i)
print(sum, p * N)
data_3_1 = [93.14,94.66, 102.1, 79.98, 96.85, 106.79, 101.92, 91.99, 97.22, 99.1, 88.7, 123.66, 99.7, 115.03, 99.28, 114.59, 102.25, 88.4, 111.06, 75.19, 107.32, 81.21, 100.49, 109.04, 105.09, 96.17, 78.13, 98.37, 104.47, 95.41]
data_3_2 = [2.24,3.86, 2.19, 1.5, 2.34, 2.55, 1.8, 3.99, 2.64, 3.8]
data_3_3 = [53.43,50.49, 52.55, 51.73]
import numpy as np
from scipy import stats as ss
sample_mean = np.mean(data_3_1)
sample_std = np.std(data_3_1, ddof=1)
Zlo = ss.norm.ppf(0.1)
Xlo = Zlo * sample_std / np.sqrt(len(data_3_1)) + sample_mean
Xhi = -Zlo * sample_std / np.sqrt(len(data_3_1)) + sample_mean
print(Xlo, 'to', Xhi)
sample_mean = np.mean(data_3_2)
sample_std = np.std(data_3_2, ddof=1)
Tlo = ss.t.ppf(0.01, len(data_3_2) - 1)
Xlo = Tlo * sample_std /np.sqrt(len(data_3_2)) + sample_mean
print(Xlo)
sample_mean = np.mean(data_3_3)
sample_std = np.std(data_3_3, ddof=1)
Tlo = ss.t.ppf(0.025, len(data_3_3) - 1)
Xlo = Tlo * sample_std / np.sqrt(len(data_3_3)) + sample_mean
Xhi = -Tlo * sample_std / np.sqrt(len(data_3_3)) + sample_mean
print(Xlo, 'to', Xhi)
sample_mean = np.mean(data_3_3)
true_std = 2
Zlo = ss.norm.ppf(0.025)
Xlo = Zlo * true_std / np.sqrt(len(data_3_3)) + sample_mean
Xhi = -Zlo * true_std / np.sqrt(len(data_3_3)) + sample_mean
print(Xlo, 'to', Xhi)
X = [1.6,0.4, -1.05, -0.08, 0.99, -1.89, 0.29, 0.71, -0.47, 1.15]
Y = [3.59,1.49, -2.57, -0.0, 2.0, -3.48, 0.14, 1.38, -1.48, 2.6]
for xi, yi in zip(X, Y):
print(xi, yi)
ans = np.corrcoef(X, Y, ddof=1)
print(ans[0,1])
#compute the mean first
xmean = 0
ymean = 0
for xi, yi in zip(X, Y):
xmean += xi
ymean += yi
xmean /= len(X)
ymean /= len(Y)
#now compute covariance using our previous calculation.
cov = 0
for xi, yi in zip(X, Y):
cov += (xi - xmean) * (yi - ymean)
cov /= len(X) - 1
print(cov)
from math import *
YSort = Y[:]
YSort.sort()
N = len(YSort)
print((YSort[int(N / 2)] + YSort[int(N / 2 - 1)]) / 2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Using interact for animation with data
Step3: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays
Step4: Compute a 2d NumPy array called phi
Step6: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
Step7: Use interact to animate the plot_soliton_data function versus time.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def soliton(x, t, c, a):
Return phi(x, t) for a soliton wave with constants c and a.
return (0.5)*c*((1/np.cosh(((np.sqrt(c)*0.5)*(x-c*t-a)))**2))
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
tmin = 0.0
tmax = 10.0
tpoints = 100
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
phi=np.zeros((xpoints,tpoints)) #worked with Hunter Thomas
for i in x:
for j in t:
phi[i,j]=soliton(x[i],t[j],c,a)
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
def plot_soliton_data(i=0):
Plot the soliton data at t[i] versus x.
plt.plot(soliton(x,t[i],c,a))
plt.xlabel('Time')
plt.ylabel('Phi')
plt.title('Solition wave vs. Time')
plt.tick_params(axis='x', top='off', direction='out')
plt.tick_params(axis='y', right='off', direction='out')
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
interact(plot_soliton_data, i=(0.0,50.0,0.1));
assert True # leave this for grading the interact with plot_soliton_data cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting with Geoplot
Step2: Geoplot can re-project data into any of the map projections provided by
Step3: If you want to use size as a visual variable, you want a cartogram. Here
Step4: If we have data in the shape of points in space, we may generate a
|
<ASSISTANT_TASK:>
Python Code:
import geopandas
path = geopandas.datasets.get_path('naturalearth_lowres')
df = geopandas.read_file(path)
# Add a column we'll use later
df['gdp_pp'] = df['gdp_md_est'] / df['pop_est']
boroughs = geopandas.read_file(geopandas.datasets.get_path('nybb')).to_crs(epsg='4326')
injurious_collisions = geopandas.read_file(
"https://github.com/ResidentMario/geoplot-data/raw/master/nyc-injurious-collisions.geojson")
import geoplot
geoplot.polyplot(df, figsize=(8, 4))
geoplot.choropleth(df, hue='gdp_pp', cmap='Greens', figsize=(8, 4))
geoplot.cartogram(df[df['continent'] == 'Africa'],
scale='pop_est', limits=(0.2, 1), figsize=(7, 8))
ax = geoplot.kdeplot(injurious_collisions.sample(1000),
shade=True, shade_lowest=False,
clip=boroughs.geometry)
geoplot.polyplot(boroughs, ax=ax)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Runge-Kutta methods
Step3: Four-stage Runge-Kutta methods implementation
Step4: Examples
Step5: As we can see from the figure above, the solutions are nearly identical (we almost cannot distinguish between them).
Step6: As we can see from the figure above, the solutions are nearly identical (we almost cannot distinguish between them).
Step7: Example 4
Step8: Question 2
|
<ASSISTANT_TASK:>
Python Code:
def rk2(x_0, y_0, f, step=0.001, k_max=None, method='improved_euler'):
r
Two-stage Runge-Kutta method for solving first-order ODE.
The function computes `k_max` iterations from the initial conditions `x_0` and `y_0` with
steps of size `step`. It yields a total of `k_max` + 1 values. Being h_{k} the step at x_{k},
the recorrent equation is:
y_{k+1} = y_{k} + h_{k} * (1-(1/(2*lambda)) k_{1} + (1/(2*lambda)) k_{2})
where
k_{1} = f(x_{k}, y_{k})
k_{2} = f(x_{k} + lambda * h_{k}, y_{k} + lambda * h_{k} * k_{1})
When `method` is 'improved_euler', `lambda` is set to 1.
When `method` is 'heun', `lambda` is set to 2/3.
Parameters
----------
x_0 : float
The initial value for the independent variable.
y_0 : array_like
1-D array of initial values for the dependente variable evaluated at `x_0`.
f : callable
The function that represents the first derivative of y with respect to x.
It must accept two arguments: the point x at which it will be evaluated and
the value of y at this point.
step : float, optional
The step size between each iteration.
k_max : number
The maximum number of iterations.
method : ["improved_euler", "heun"]
The specific two-stage method to use.
Yields
------
x_k : float
The point at which the function was evaluated in the last iteration.
y_k : float
The value of the function in the last iteration.
Raises
------
TypeError
If the method argument is invalid or not supported.
if k_max is None: counter = itertools.count()
else: counter = range(k_max)
if method == 'improved_euler':
b1, b2 = 1/2.0, 1/2.0
c2 = 1
a21 = 1
elif method == 'heun':
b1, b2 = 1/4.0, 3/4.0
c2 = 2/3.0
a21 = 2/3.0
else:
raise TypeError("The method {} is not valid or supported.".format(method))
x_k = x_0
y_k = y_0
yield (x_k, y_k)
for k in counter:
k1 = f(x_k, y_k)
k2 = f(x_k + c2 * step, y_k + a21 * step * k1)
y_k = y_k + step * (b1 * k1 + b2 * k2)
x_k = x_k + step
yield (x_k, y_k)
def rk4(x_0, y_0, f, step=0.001, k_max=None, method='classical'):
r
Four-stage Runge-Kutta methods for solving first-order ODE.
The function computes `k_max` iterations from the initial conditions `x_0` and `y_0` with
steps of size `step`. It yields a total of `k_max` + 1 values. We call h_{k} the step at x_{k}.
Classical Runge-Kutta method (RK4):
y_{k+1} = y_{k} + h/6 * (k_{1} + 2*k_{2} + 2*k_{3} + k_{4})
where
k_{1} = f(x_{k}, y_{k})
k_{2} = f(x_{k} + h_{k}/2, y_{k} + h_{k}/2 * k_{1})
k_{3} = f(x_{k} + h_{k}/2, y_{k} + h_{k}/2 * k_{2})
k_{3} = f(x_{k} + h_{k}, y_{k} + h_{k} * k_{3})
Variant of the classical Runge-Kutta method:
y_{k+1} = y_{k} + h/8 * (k_{1} + 3*k_{2} + 3*k_{3} + k_{4})
where
k_{1} = f(x_{k}, y_{k})
k_{2} = f(x_{k} + h_{k}/3, y_{k} + h_{k}/3 * k_{1})
k_{3} = f(x_{k} + 2*h_{k}/3, y_{k} - h_{k}/3 * k_{1} + h_{k} * k_{2})
k_{3} = f(x_{k} + h_{k}, y_{k} + h_{k} * k_{1} - h_{k} * k_{2} + h_{k} * k_{3})
Parameters
----------
x_0 : float
The initial value for the independent variable.
y_0 : array_like
1-D array of initial values for the dependente variable evaluated at `x_0`.
f : callable
The function that represents the first derivative of y with respect to x.
It must accept two arguments: the point x at which it will be evaluated and
the value of y at this point.
step : float, optional
The step size between each iteration.
k_max : number
The maximum number of iterations.
method : ["classical", "variant"]
The specific four-stage method to use.
Yields
------
x_k : float
The point at which the function was evaluated in the last iteration.
y_k : float
The value of the function in the last iteration.
Raises
------
TypeError
If the method argument is invalid or not supported.
if k_max is None: counter = itertools.count()
else: counter = range(k_max)
if method == 'classical':
b1, b2, b3, b4 = 1/6.0, 1/3.0, 1/3.0, 1/6.0
c2, c3, c4 = 1/2.0, 1/2.0, 1
a21, a31, a32, a41, a42, a43 = 1/2.0, 0, 1/2.0, 0, 0, 1
elif method == 'variant':
b1, b2, b3, b4 = 1/8.0, 3/8.0, 3/8.0, 1/8.0
c2, c3, c4 = 1/3.0, 2/3.0, 1
a21, a31, a32, a41, a42, a43 = 1/3.0, -1/3.0, 1, 1, -1, 1
else:
raise TypeError("The method {} is not valid or supported.".format(method))
x_k = x_0
y_k = y_0
yield (x_k, y_k)
for k in counter:
k1 = f(x_k, y_k)
k2 = f(x_k + c2 * step, y_k + a21 * step * k1)
k3 = f(x_k + c3 * step, y_k + a31 * step * k1 + a32 * step * k2)
k4 = f(x_k + c4 * step, y_k + a41 * step * k1 + a42 * step * k2 + a43 * step * k3)
y_k = y_k + step * (b1 * k1 + b2 * k2 + b3 * k3 + b4 * k4)
x_k = x_k + step
yield (x_k, y_k)
def example1(x_k, y_k):
return x_k**2 + y_k**2
results = rk2(x_0=0.0, y_0=0.0, f=example1, step=0.1, k_max=10, method='improved_euler')
x, y_improved_euler = extract(results)
results = rk2(x_0=0.0, y_0=0.0, f=example1, step=0.1, k_max=10, method='heun')
x, y_heun = extract(results)
df1 = pd.DataFrame({"x": x, "y_improved_euler": y_improved_euler, "y_heun": y_heun})
df1
fig, ax = plt.subplots(figsize=(13, 8))
plt.plot(df1['x'], df1['y_improved_euler'], label='Improved Euler approximation with step 0.1', color='blue')
plt.plot(df1['x'], df1['y_heun'], label='Heun approximation with step 0.1', color='red')
plt.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True)
ax.set(title="Two-stage Runge-Kutta methods", xlabel="x", ylabel="y");
def example2(x_k, y_k):
return x_k**2 + y_k**2
results = rk4(x_0=0.0, y_0=0.0, f=example2, step=0.1, k_max=10, method='classical')
x, y_classical_rk4 = extract(results)
results = rk4(x_0=0.0, y_0=0.0, f=example2, step=0.1, k_max=10, method='variant')
x, y_variant_rk4 = extract(results)
df2 = pd.DataFrame({"x": x,
"y_classical_rk4": y_classical_rk4,
"y_variant_rk4": y_variant_rk4})
df2
fig, ax = plt.subplots(figsize=(13, 8))
plt.plot(df2['x'], df2['y_classical_rk4'],
label='Classical Runge-Kutta approximation with step 0.1', color='blue')
plt.plot(df2['x'], df2['y_variant_rk4'],
label='Variant of the classical Runge-Kutta approximation with step 0.1', color='red')
plt.legend(loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True)
ax.set(title="Four-stage Runge-Kutta methods", xlabel="x", ylabel="y");
def example3(x_k, y_k):
return np.tan(y_k) + 1
results = rk2(x_0=1.0, y_0=1.0, f=example3, step=0.025, k_max=4, method='heun')
x, y_heun = extract(results)
df3 = pd.DataFrame({"x": x, "y_heun": y_heun})
df3
def example4(t_k, u_k):
return np.array([u_k[1], -3*np.cos(t_k) - np.exp(u_k[1]) + 1 - u_k[0]])
results = rk4(x_0=0.0, y_0=np.array([0.0, 0.0]), f=example4, step=0.01, k_max=5000, method='classical')
t, ys = extract(results)
y_classical, dy_classical = extract(ys)
df4 = pd.DataFrame({"t": t, "y_classical": y_classical, "dy_classical": dy_classical})
t_interval = (df4.t > 43) & (df4.t < 50)
df4_interval = df4.loc[t_interval, ["t", "y_classical"]]
max_y = df4_interval.loc[:, "y_classical"].max()
min_y = df4_interval.loc[:, "y_classical"].min()
print("The amplitude of oscilattion for t in [43, 50] is {0:.3f}.".format(max_y - min_y))
fig, ax = plt.subplots(figsize=(13, 8))
plt.plot(df4['t'], df4['y_classical'],
label="Classical Runge-Kutta approximation with step 0.01", color='blue')
plt.plot(df4_interval['t'], df4_interval['y_classical'],
label="Interval of interest, $t \in [43, 50]$", color='red')
plt.legend(loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True)
ax.set(title=r"Solution of y'' + (exp(y') - 1) + y = -3cos(t)", xlabel="t", ylabel="y");
def rk4_modified(x_0, y_0, f, step=0.001, k_max=None):
if k_max is None: counter = itertools.count()
else: counter = range(k_max)
b1, b2, b3, b4, b5 = 1/6.0, 0.0, 0.0, 2/3.0, 1/6.0
c2, c3, c4, c5 = 1/3.0, 1/3.0, 1/2.0, 1.0
a21, a31, a32, a41, a42, a43, a51, a52, a53, a54 = 1/3.0, 1/6.0, 1/6.0, 1/8.0, 0.0, 3/8.0, 1/2.0, 0.0, -3/2.0, 2.0
x_k = x_0
y_k = y_0
yield (x_k, y_k)
for k in counter:
k1 = f(x_k, y_k)
k2 = f(x_k + c2 * step, y_k + a21 * step * k1)
k3 = f(x_k + c3 * step, y_k + a31 * step * k1 + a32 * step * k2)
k4 = f(x_k + c4 * step, y_k + a41 * step * k1 + a42 * step * k2 + a43 * step * k3)
k5 = f(x_k + c5 * step, y_k + a51 * step * k1 + a52 * step * k2 + a53 * step * k3 + a54 * step * k4)
y_k = y_k + step * (b1 * k1 + b2 * k2 + b3 * k3 + b4 * k4 + b5 * k5)
x_k = x_k + step
yield (x_k, y_k)
def question2(t, u_k):
return np.array([(4/5.0) * u_k[0] * u_k[1] - (1/4.0) * u_k[0], -(4/5.0) * u_k[0] * u_k[1]])
results = rk4_modified(x_0=0.0, y_0=np.array([0.005, 0.995]), f=question2, step=0.0125, k_max=800)
t, i_s = extract(results)
i, s = extract(i_s)
i, s = np.array(i), np.array(s)
df5 = pd.DataFrame({"t": t, "I": i, "S": s, "R": (1 - (i + s))})
df5 = df5[["t", "I", "S", "R"]]
print("Ratio I(10)/R(10) is {:.2f}.".format(df5["I"].iloc[-1]/df5["R"].iloc[-1]))
fig, ax = plt.subplots(figsize=(13, 8))
plt.plot(df5['t'], df5['I'],
label="$I(t)$: infected", color='blue')
plt.plot(df5['t'], df5['S'],
label="$S(t)$: non-infected", color='green')
plt.plot(df5['t'], df5['R'],
label="$R(t)$: recovered", color='red')
plt.legend(loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True)
ax.set(title=r"Epidemic evolution: Kermack–McKendrick SIR model", xlabel="t", ylabel="y");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Environment
Step3: Try out Environment
Step4: Baseline
Step5: Train model
Step6: Visualizing Results
Step7: Enjoy model
Step8: Evaluation
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/openai/baselines >/dev/null
!pip install gym >/dev/null
import numpy as np
import random
import gym
from gym.utils import seeding
from gym import spaces
def state_name_to_int(state):
state_name_map = {
'S': 0,
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'H': 8,
'K': 9,
'L': 10,
'M': 11,
'N': 12,
'O': 13
}
return state_name_map[state]
def int_to_state_name(state_as_int):
state_map = {
0: 'S',
1: 'A',
2: 'B',
3: 'C',
4: 'D',
5: 'E',
6: 'F',
7: 'G',
8: 'H',
9: 'K',
10: 'L',
11: 'M',
12: 'N',
13: 'O'
}
return state_map[state_as_int]
class BeraterEnv(gym.Env):
The Berater Problem
Actions:
There are 4 discrete deterministic actions, each choosing one direction
metadata = {'render.modes': ['ansi']}
showStep = False
showDone = True
envEpisodeModulo = 100
def __init__(self):
# self.map = {
# 'S': [('A', 100), ('B', 400), ('C', 200 )],
# 'A': [('B', 250), ('C', 400), ('S', 100 )],
# 'B': [('A', 250), ('C', 250), ('S', 400 )],
# 'C': [('A', 400), ('B', 250), ('S', 200 )]
# }
self.map = {
'S': [('A', 300), ('B', 100), ('C', 200 )],
'A': [('S', 300), ('B', 100), ('E', 100 ), ('D', 100 )],
'B': [('S', 100), ('A', 100), ('C', 50 ), ('K', 200 )],
'C': [('S', 200), ('B', 50), ('M', 100 ), ('L', 200 )],
'D': [('A', 100), ('F', 50)],
'E': [('A', 100), ('F', 100), ('H', 100)],
'F': [('D', 50), ('E', 100), ('G', 200)],
'G': [('F', 200), ('O', 300)],
'H': [('E', 100), ('K', 300)],
'K': [('B', 200), ('H', 300)],
'L': [('C', 200), ('M', 50)],
'M': [('C', 100), ('L', 50), ('N', 100)],
'N': [('M', 100), ('O', 100)],
'O': [('N', 100), ('G', 300)]
}
max_paths = 4
self.action_space = spaces.Discrete(max_paths)
positions = len(self.map)
# observations: position, reward of all 4 local paths, rest reward of all locations
# non existing path is -1000 and no position change
# look at what #getObservation returns if you are confused
low = np.append(np.append([0], np.full(max_paths, -1000)), np.full(positions, 0))
high = np.append(np.append([positions - 1], np.full(max_paths, 1000)), np.full(positions, 1000))
self.observation_space = spaces.Box(low=low,
high=high,
dtype=np.float32)
self.reward_range = (-1, 1)
self.totalReward = 0
self.stepCount = 0
self.isDone = False
self.envReward = 0
self.envEpisodeCount = 0
self.envStepCount = 0
self.reset()
self.optimum = self.calculate_customers_reward()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def iterate_path(self, state, action):
paths = self.map[state]
if action < len(paths):
return paths[action]
else:
# sorry, no such action, stay where you are and pay a high penalty
return (state, 1000)
def step(self, action):
destination, cost = self.iterate_path(self.state, action)
lastState = self.state
customerReward = self.customer_reward[destination]
reward = (customerReward - cost) / self.optimum
self.state = destination
self.customer_visited(destination)
done = destination == 'S' and self.all_customers_visited()
stateAsInt = state_name_to_int(self.state)
self.totalReward += reward
self.stepCount += 1
self.envReward += reward
self.envStepCount += 1
if self.showStep:
print( "Episode: " + ("%4.0f " % self.envEpisodeCount) +
" Step: " + ("%4.0f " % self.stepCount) +
lastState + ' --' + str(action) + '-> ' + self.state +
' R=' + ("% 2.2f" % reward) + ' totalR=' + ("% 3.2f" % self.totalReward) +
' cost=' + ("%4.0f" % cost) + ' customerR=' + ("%4.0f" % customerReward) + ' optimum=' + ("%4.0f" % self.optimum)
)
if done and not self.isDone:
self.envEpisodeCount += 1
if BeraterEnv.showDone:
episodes = BeraterEnv.envEpisodeModulo
if (self.envEpisodeCount % BeraterEnv.envEpisodeModulo != 0):
episodes = self.envEpisodeCount % BeraterEnv.envEpisodeModulo
print( "Done: " +
("episodes=%6.0f " % self.envEpisodeCount) +
("avgSteps=%6.2f " % (self.envStepCount/episodes)) +
("avgTotalReward=% 3.2f" % (self.envReward/episodes) )
)
if (self.envEpisodeCount%BeraterEnv.envEpisodeModulo) == 0:
self.envReward = 0
self.envStepCount = 0
self.isDone = done
observation = self.getObservation(stateAsInt)
info = {"from": self.state, "to": destination}
return observation, reward, done, info
def getObservation(self, position):
result = np.array([ position,
self.getPathObservation(position, 0),
self.getPathObservation(position, 1),
self.getPathObservation(position, 2),
self.getPathObservation(position, 3)
],
dtype=np.float32)
all_rest_rewards = list(self.customer_reward.values())
result = np.append(result, all_rest_rewards)
return result
def getPathObservation(self, position, path):
source = int_to_state_name(position)
paths = self.map[self.state]
if path < len(paths):
target, cost = paths[path]
reward = self.customer_reward[target]
result = reward - cost
else:
result = -1000
return result
def customer_visited(self, customer):
self.customer_reward[customer] = 0
def all_customers_visited(self):
return self.calculate_customers_reward() == 0
def calculate_customers_reward(self):
sum = 0
for value in self.customer_reward.values():
sum += value
return sum
def modulate_reward(self):
number_of_customers = len(self.map) - 1
number_per_consultant = int(number_of_customers/2)
# number_per_consultant = int(number_of_customers/1.5)
self.customer_reward = {
'S': 0
}
for customer_nr in range(1, number_of_customers + 1):
self.customer_reward[int_to_state_name(customer_nr)] = 0
# every consultant only visits a few random customers
samples = random.sample(range(1, number_of_customers + 1), k=number_per_consultant)
key_list = list(self.customer_reward.keys())
for sample in samples:
self.customer_reward[key_list[sample]] = 1000
def reset(self):
self.totalReward = 0
self.stepCount = 0
self.isDone = False
self.modulate_reward()
self.state = 'S'
return self.getObservation(state_name_to_int(self.state))
def render(self):
print(self.customer_reward)
env = BeraterEnv()
print(env.reset())
print(env.customer_reward)
BeraterEnv.showStep = True
BeraterEnv.showDone = True
env = BeraterEnv()
print(env)
observation = env.reset()
print(observation)
for t in range(1000):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
print(observation)
from copy import deepcopy
import json
class Baseline():
def __init__(self, env, verbose=1):
self.env = env
self.verbose = verbose
self.reset()
def reset(self):
self.map = self.env.map
self.rewards = self.env.customer_reward.copy()
def as_string(self, state):
# reward/cost does not hurt, but is useless, path obsucres same state
new_state = {
'rewards': state['rewards'],
'position': state['position']
}
return json.dumps(new_state, sort_keys=True)
def is_goal(self, state):
if state['position'] != 'S': return False
for reward in state['rewards'].values():
if reward != 0: return False
return True
def expand(self, state):
states = []
for position, cost in self.map[state['position']]:
new_state = deepcopy(state)
new_state['position'] = position
new_state['rewards'][position] = 0
reward = state['rewards'][position]
new_state['reward'] += reward
new_state['cost'] += cost
new_state['path'].append(position)
states.append(new_state)
return states
def search(self, root, max_depth = 25):
closed = set()
open = [root]
while open:
state = open.pop(0)
if self.as_string(state) in closed: continue
closed.add(self.as_string(state))
depth = len(state['path'])
if depth > max_depth:
if self.verbose > 0:
print("Visited:", len(closed))
print("Reached max depth, without reaching goal")
return None
if self.is_goal(state):
scaled_reward = (state['reward'] - state['cost']) / 6000
state['scaled_reward'] = scaled_reward
if self.verbose > 0:
print("Scaled reward:", scaled_reward)
print("Perfect path", state['path'])
return state
expanded = self.expand(state)
open += expanded
# make this best first
open.sort(key=lambda state: state['cost'])
def find_optimum(self):
initial_state = {
'rewards': self.rewards.copy(),
'position': 'S',
'reward': 0,
'cost': 0,
'path': ['S']
}
return self.search(initial_state)
def benchmark(self, model, sample_runs=100):
self.verbose = 0
BeraterEnv.showStep = False
BeraterEnv.showDone = False
perfect_rewards = []
model_rewards = []
for run in range(sample_runs):
observation = self.env.reset()
self.reset()
optimum_state = self.find_optimum()
perfect_rewards.append(optimum_state['scaled_reward'])
state = np.zeros((1, 2*128))
dones = np.zeros((1))
for t in range(1000):
actions, _, state, _ = model.step(observation, S=state, M=dones)
observation, reward, done, info = self.env.step(actions[0])
if done:
break
model_rewards.append(env.totalReward)
return perfect_rewards, model_rewards
def score(self, model, sample_runs=100):
perfect_rewards, model_rewards = self.benchmark(model, sample_runs=100)
perfect_score_mean, perfect_score_std = np.array(perfect_rewards).mean(), np.array(perfect_rewards).std()
test_score_mean, test_score_std = np.array(model_rewards).mean(), np.array(model_rewards).std()
return perfect_score_mean, perfect_score_std, test_score_mean, test_score_std
!rm -r logs
!mkdir logs
!mkdir logs/berater
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
%%time
# https://github.com/openai/baselines/blob/master/baselines/deepq/experiments/train_pong.py
# log_dir = logger.get_dir()
log_dir = '/content/logs/berater/'
import gym
from baselines import bench
from baselines import logger
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.vec_monitor import VecMonitor
from baselines.ppo2 import ppo2
from baselines.a2c import a2c
BeraterEnv.showStep = False
BeraterEnv.showDone = False
env = BeraterEnv()
wrapped_env = DummyVecEnv([lambda: BeraterEnv()])
monitored_env = VecMonitor(wrapped_env, log_dir)
# https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py
# https://github.com/openai/baselines/blob/master/baselines/common/models.py#L30
# https://arxiv.org/abs/1607.06450 for layer_norm
# lr linear from lr=1e-2 to lr=1e-4 (default lr=3e-4)
def lr_range(frac):
# we get the remaining updates between 1 and 0
start_lr = 1e-2
end_lr = 1e-4
diff_lr = start_lr - end_lr
lr = end_lr + diff_lr * frac
return lr
def mlp(num_layers=2, num_hidden=64, activation=tf.nn.relu, layer_norm=False):
def network_fn(X):
h = tf.layers.flatten(X)
for i in range(num_layers):
h = tf.layers.dense(h, units=num_hidden, kernel_initializer=tf.initializers.glorot_uniform(seed=17))
if layer_norm:
h = tf.contrib.layers.layer_norm(h, center=True, scale=True)
h = activation(h)
return h
return network_fn
network = mlp(num_hidden=500, num_layers=3, layer_norm=True)
# Parameters
# https://github.com/openai/baselines/blob/master/baselines/a2c/a2c.py
model = a2c.learn(
env=monitored_env,
network=network,
gamma=1.0,
ent_coef=0.05,
log_interval=50000,
total_timesteps=1000000)
# model.save('berater-ppo-v12.pkl')
monitored_env.close()
# !ls -l $log_dir
from baselines.common import plot_util as pu
results = pu.load_results(log_dir)
import matplotlib.pyplot as plt
import numpy as np
r = results[0]
plt.ylim(0, .75)
# plt.plot(np.cumsum(r.monitor.l), r.monitor.r)
plt.plot(np.cumsum(r.monitor.l), pu.smooth(r.monitor.r, radius=100))
import numpy as np
observation = env.reset()
env.render()
baseline = Baseline(env)
state = np.zeros((1, 2*128))
dones = np.zeros((1))
BeraterEnv.showStep = True
BeraterEnv.showDone = False
for t in range(1000):
actions, _, state, _ = model.step(observation, S=state, M=dones)
observation, reward, done, info = env.step(actions[0])
if done:
print("Episode finished after {} timesteps, reward={}".format(t+1, env.totalReward))
break
env.close()
%time baseline.find_optimum()
baseline = Baseline(env)
perfect_score_mean, perfect_score_std, test_score_mean, test_score_std = baseline.score(model, sample_runs=100)
# perfect scores
perfect_score_mean, perfect_score_std
# test scores for our model
test_score_mean, test_score_std
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Indefinite integrals
Step2: Integral 1
Step3: Integral 2
Step4: Integral 3
Step5: Integral 4
Step6: Integral 5
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import integrate
import math as m
def integrand(x, a):
return 1.0/(x**2 + a**2)
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(a,))
return I
def integral_exact(a):
return 0.5*np.pi/a
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
# YOUR CODE HERE
# raise NotImplementedError()
def integrand(x):
return (np.sin(x))**2
def integral_approx(a):
I, err = integrate.quad(integrand, 0, np.pi)
return I
def integral_exact(a):
return np.pi/4
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
# YOUR CODE HERE
def integrand(x, p):
return ((np.sin(p*x))**2)/x
def integral_approx(p):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(p,))
return I
def integral_exact(p):
return 0.5*np.pi*p/2
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
# YOUR CODE HERE
def integrand(x, p):
return (1.0-np.cos(p*x))/x
def integral_approx(p):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(p,))
return I
def integral_exact(p):
return 0.5*np.pi*p/2
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
# YOUR CODE HERE
def integrand(x):
return x/(np.exp(x)-1)
def integral_approx(a):
I, err = integrate.quad(integrand, 0, np.inf)
return I
def integral_exact(a):
return np.pi**2/6
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
# YOUR CODE HERE
# YOUR CODE HERE
def integrand(x):
return x/(np.exp(x)+1)
def integral_approx(a):
I, err = integrate.quad(integrand, 0, np.inf)
return I
def integral_exact(a):
return np.pi**2/12
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Observations
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import collections
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# import seaborn as sns
# sns.set_style("whitegrid", {'axes.grid' : False})
train_categorical_iter=pd.read_csv("../data/train_categorical.csv",chunksize=100000, dtype=str,usecols=list(range(1,2141)))
train_categorical_data = collections.defaultdict(set)
for chunk in train_categorical_iter:
for col in chunk:
train_categorical_data[col] = train_categorical_data[col].union(chunk[col][chunk[col].notnull()].unique())
print ("Number of Features " +str(len(train_categorical_data.keys())))
countEmpty=countSingle= countMultiple=0
for key in train_categorical_data.keys():
if len(train_categorical_data[key])==0:
countEmpty+=1
continue
if len(train_categorical_data[key])==1:
countSingle+=1
continue
countMultiple+=1
objects = ('Missing Value', 'Single Value', 'Multiple Values')
y_pos = np.arange(3)
plt.bar([0,1,2],[countEmpty, countSingle, countMultiple], align='center', color='r')
plt.xticks([0,1,2], objects)
plt.ylabel('Frequency')
plt.title('Features in Training Data (Categorical)')
plt.show()
def plotFeatures(filename, xtitle, key):
if "numeric" in filename:
data = pd.read_csv(filename, nrows=1).drop(["Response", "Id"], axis=1).columns.values
else:
data = pd.read_csv(filename, nrows=1).drop(["Id"], axis=1).columns.values
features = {}
lines = set([dataPoint.split('_')[key] for dataPoint in data])
for l in lines:
features[l] = [item for item in data if l+'_' in item]
xvalue = "Station Name"
yvalue = "Number of Features"
if key==0:
xvalue = "Line Number"
df = pd.DataFrame(list({int(key[1:]): len(features[key]) for
key in features.keys()}.items()),
columns=[xvalue, yvalue])
stations_plot = df.plot(x=xvalue, y=yvalue, kind="bar",
title=xtitle,
color='blue')
plotFeatures("../data/train_numeric.csv", "Number of Numerical Features at a Station",1)
plotFeatures("../data/train_date.csv", "Number of Date Features at a Station",1)
plotFeatures("../data/train_categorical.csv", "Number of Categorical Features at a Station",1)
plotFeatures("../data/train_numeric.csv", "Number of Numerical Features at a Line",0)
plotFeatures("../data/train_date.csv", "Number of Date Features at a Line",0)
plotFeatures("../data/train_categorical.csv", "Number of Categorical Features at a Line",0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'ukesm1-0-ll', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q-learned trading performance
Step2: Random trading performance
Step3: alpha & gamma
|
<ASSISTANT_TASK:>
Python Code:
fig = plt.figure(figsize=(12,4))
df = pd.read_csv('rtntop.csv').set_index('date')
cols = [c for c in df.columns if 'rtn' in c]
for i, c in enumerate(cols):
ax = plt.subplot(130+(1+i))
df[['bhreturn',c]].plot(ax=ax)
ax.legend().set_visible(False)
ax.set_ylabel('Return').set_visible(False if i else True)
ax.set_xlabel('Date')
plt.xticks(rotation=25,size='x-small')
fig.suptitle('SPY Learned (green) vs. Buy & Hold', fontsize=14)
plt.gcf().subplots_adjust(bottom=0.15)
fig.savefig('topSPY.png', bbox_inches='tight')
df = pd.read_csv('return.csv')
fig = plt.figure(figsize=(9,3))
ax = plt.subplot(111)
df.returns.hist(bins=1000, label='foo')
ax.set_xlim([-2,4])
ax.set_xlabel('Total Return')
ax.set_title('Q-learned trading performance (500 unique stocks)')
mean, std = df.returns.describe()[['mean','std']]
ax.annotate('mean={mean:0.6f}\nstd={std:0.6f}'.format(mean=mean, std=std),
xy=(0, 0), xytext=(.85,.85), textcoords='axes fraction', size='small')
fig.savefig('return.png', bbox_inches='tight')
df = pd.read_csv('returnrand.csv')
fig = plt.figure(figsize=(9,3))
ax = plt.subplot(111)
df.returns.hist(bins=10000, label='foo')
ax.set_xlim([-9,11])
ax.set_xlabel('Total Return')
ax.set_title('Random trading performance (500 unique stocks)')
mean, std = df.returns.describe()[['mean','std']]
ax.annotate('mean={mean:0.6f}\nstd={std:0.6f}'.format(mean=mean, std=std),
xy=(0, 0), xytext=(.85,.85), textcoords='axes fraction', size='small')
fig.savefig('returnrand.png', bbox_inches='tight')
df = pd.read_csv('return.csv')
def _plot(df, var):
series = df.groupby(var).returns
mean = series.mean()
# use log10 to diminish effect on plot
err = np.log10(series.std())
fig = plt.figure(figsize=(4,3))
ax = plt.subplot(111)
mean.plot(ax=ax)
ax.set_ylabel('Total Return')
plt.fill_between(mean.index, mean+err, mean-err, edgecolor='b', alpha=.2)
fig.savefig('{}.png'.format(var), bbox_inches='tight')
_plot(df, 'alpha')
_plot(df, 'gamma')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Indexing strings
Step2: This is very close to what we did last week when we looked at for statements
Step3: Question - How would you get the last element of the list?
Step4: Formatting strings
Step5: Question - Write some code which asks a user for their birthday day, month and year separately and print the result formatted together
Step6: Escape characters
Step7: String methods
Step8: Triple quotes and multi line strings
Step9: A note on encoding
Step10: Reading files
Step11: Note
Step12: Writing files
Step13: Closing files
Step14: Context managers
Step15: We've breezed over File IO for now but if you want to look more in detail the TutorialsPoint page in this subject can give you more guidance if you are lost.
Step16: Indexing a list (it's just like indexing a string!)
Step17: Iterating over a list
Step18: Modifying a list
Step19: You can also delete elements and add to the end
Step20: Problem - Print the list in reverse order using a for loop
Step21: List comprehensions
Step22: This is a small example where the for loop code is only two lines so the advantage is small. But you can actually do all of this in 1 line!
Step23: This is the basic version of the comprehension. You can also add conditions, like to only keep Celsius temperatures above 37
Step24: If we wanted to do this with loops we would have a for loop and an if statement. Now we have neither!
Step25: Challenge - Write a comprehension to create a list of the first 10 square numbers?
|
<ASSISTANT_TASK:>
Python Code:
# Everyone should know how to create (or "declare") a string by now
var = 'This is a string'
alphabet = 'abcdefghijklmnopqrstuvwxyz'
# We can get only the first element of the alphabet
# Note that a is the 0th character in the string
first_letter = alphabet[0]
print(first_letter)
# To get the second letter we would do this
second_letter = alphabet[1]
print(second_letter)
# We can also get a range of characters in the string
first_five_letters = alphabet[0:5]
print(first_five_letters)
# You can see that when we run the for loop, Python looks at the indexes of the letters
# in the string to iterate over
for letter in alphabet:
print(alphabet.index(letter))
print(letter)
print()
# How can we get the z from the alphabet variable?
print(alphabet[])
# This us how you read input from a user
name = input('Input name: ')
# Now use the {} to leave some parts of the string blank and fill them in later
var = 'Hello! My name is {}'.format(name)
print(var)
# TO DO - Make 3 input requests to the user and format the result together as 1 string
# END TO DO
n_string = 'This\nis\na\nmulti\nline\nstring\n'
print(n_string)
t_string = 'This\tstring\thas\ttab\tspaces\n'
print(t_string)
q_string = '\"This string is inside quotes\"'
print(q_string)
string1 = 'Hello'
string2 = 'Python'
# Add two strings together
print(string1 + string2)
# Another way to do this is using .join
print(' '.join([string1,string2]))
# Repetition using the * multiplication operator
print(string1*3)
# Remember the membership `in` keyword
print('o' in string1)
# You can make all characters uppercase
print(string1.upper())
# Or all lowercase
print(string2.lower())
# You can also search for patterns and replace characters
string3 = 'I\'m really more of a cat person'
print(string3.replace('cat','dog'))
# You can check if a string of a number is a number or not
print('12345'.isdigit())
# Or check if a string is only made of alphabet characters
print('alphabet'.isalpha())
# Get the length of a string
print(len(alphabet))
# To make big strings you use three quote marks
big_string = '''Mr. and Mrs. Dursley, of number four, Privet Drive, were proud to say
that they were perfectly normal, thank you very much. They were the last
people you'd expect to be involved in anything strange or mysterious,
because they just didn't hold with such nonsense.'''
# And Python will join strings declared over multiple lines together
multi_line_string = 'Hello my name is Tom ' \
'my favourite colour is Blue ' \
'and I live in Earl\'s Court'
print(multi_line_string)
# NOTE: This code won't work unless you make a file called `test_data.txt`
# in the same folder as this lesson
file = open('test_data.txt','r')
# The file is opened and has a "handle" which is the file variable for interaction
type(file)
file.read()
file.readline()
file = open('test_data.txt','r') # Reopen to set the file handle to the beginning of the text
# This loop calls .readline() automatically
for line in file:
print(line)
# You use open() to create a new file even if it did not already exist
file2 = open('write_data.txt','w+')
file2.write('this is a text file\n')
file2.write('test file for a python class\n')
# The number of chars written to the file is the return value
# Since we used w+ we can read the file to check what we created
for line in file2:
print(line)
# This is important for file security so nothing is corrupted
file.close()
file2.close()
# We could have read the file using a context manager like this
with open('test_data.txt','r') as file:
# In this indented code block the file is open
for line in file:
print(line)
# No need to close the file! It's handled by the Python program now
# Declaring a list
list1 = [1,2,5,5,6]
# A list can have different types inside each element
list2 = list(('hello',4e-6,45))
print(list1)
print()
print(list2)
print(list1[0])
print(list2[1:3])
# Exactly the same as reading a file or printing all the chars in a string
for elem in list2:
print(elem)
print(list1)
list1[-1] = 5000 # Use the -1 indexing the same as strings to get the end element
print(list1)
# Remove the end item
del list1[-1]
print(list1)
# Add a new element to the end
list1.append(6)
print(list1)
# You can also use len() to get the size of a list
print(len(list1))
# TO DO - Print this list in reverse order
# END TO DO
celsius = [39.2, 36.5, 37.3, 37.8]
print(celsius)
fahrenheit = [] #Declaring an empty list
for temp in celsius:
fahrenheit.append(float(9)/5*temp + 32)
print(fahrenheit)
# The version using a list comprehension
fahrenheit2 = [float(9)/5*temp + 32 for temp in celsius]
print(fahrenheit2)
fahrenheit3 = [float(9)/5*temp + 32 for temp in celsius if temp > 37]
print(fahrenheit3)
ones = [1,2,3,4,5]
tens = [10,20,30,40,50]
# The zip function puts each indexed element in pairs, like the teeth of a zip being locked together.
mult = [i*j for i,j in zip(ones,tens)]
print(mult)
# TODO
# END TODO
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementación no vectorizada
Step2: Ejemplo 2
Step3: Ejemplo 3
|
<ASSISTANT_TASK:>
Python Code:
def diferencia_atras(f, x_0, x_1):
pendiente = (f(x_0) - f(x_1))/(x_0 - x_1)
return pendiente
def raiz(f, a, b):
c = b - f(b)/diferencia_atras(f, a, b)
return b, c
def secante(f, x_0, x_1):
print("{0:s} \t {1:15s} \t {2:15s} \t {3:15s}".format('i', 'x anterior', 'x actual', 'error relativo %'))
x_anterior = x_0
x_actual = x_1
i = 0
print("{0:d} \t {1:.15f} \t {2:.15f} \t {3:15s}".format(i, x_anterior, x_actual, '???????????????'))
error_permitido = 0.000001
while True:
x_anterior, x_actual = raiz(f, x_anterior, x_actual)
if x_actual != 0:
error_relativo = abs((x_actual - x_anterior)/x_actual)*100
i = i + 1
print("{0:d} \t {1:.15f} \t {2:.15f} \t {3:15.11f}".format(i, x_anterior, x_actual, error_relativo))
if (error_relativo < error_permitido) or (i>=20):
break
print('\nx =', x_actual)
def f(x):
# f(x) = x^5 + x^3 + 3
y = x**5 + x**3 + 3
return y
diferencia_atras(f, 0, -1)
raiz(f, 0, -1)
secante(f, 0, -1)
secante(f, 0, -0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
Step2: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.
Step3: Training
Step4: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Step5: Checking out the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
inputs_ =
targets_ =
# Output of hidden layer
encoded =
# Output layer logits
logits =
# Sigmoid output from logits
decoded =
# Sigmoid cross-entropy loss
loss =
# Mean of the loss
cost =
# Adam optimizer
opt =
# Create the session
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data can be easily export to a csv file.
Step2: We can also load the data into an IntData object.
Step3: IntData objects
Step4: Basic data outputs for visualization or analysis
|
<ASSISTANT_TASK:>
Python Code:
# Importing pergola modules used
import sys
# We need to set the path to run this notebook directly from ipython notebook
my_path_to_modules = "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/"
sys.path.append(my_path_to_modules)
from pergola import jaaba_parsers
input_jaaba_file="../../sample_data/jaaba_example/scores_chase.mat"
path_csv="../../test"
jaaba_parsers.jaaba_scores_to_csv(input_file=input_jaaba_file, path_w=path_csv, norm=True, data_type="chase")
map_file_jaaba = "../../sample_data/jaaba_example/jaaba2pergola.txt"
int_data_jaaba = jaaba_parsers.jaaba_scores_to_intData(input_file=input_jaaba_file, map_jaaba = map_file_jaaba, norm=True, data_type="chase")
print int_data_jaaba.data_types
print int_data_jaaba.min
print int_data_jaaba.max
print int_data_jaaba.tracks
print int_data_jaaba.data[:10]
#reading the data with your desired options
read_data_jaaba = int_data_jaaba.read(relative_coord=True, fields2rel=None, multiply_t=1)
#You can set several functions when tranforming the data
#data_types select the data_types you want to transform
#dataTypes_actions in the case of having multiple dataTypes you can join them in the same track ("all") or keep them separated.
#data_type_col sets the colors to display by the genome browser when bed file is loaded for each dataTypes
data_type_col={'chase': 'blue'}
#range_color sets the range of data you want to be the maximun and lower maximun intensity
range_color=[-1, 1]
bed_jaaba = read_data_jaaba.convert(mode="bed", data_types=["chase"], dataTypes_actions="all",
color_restrictions=data_type_col, range_color=range_color)
for key in bed_jaaba:
bedSingle = bed_jaaba[key]
bedSingle.save_track(path="/Users/jespinosa/git/pergola/test")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'ukesm1-0-mmh', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can get the data via
Step2: Look at dem der data
Step3: Fully Connected Model
Step4: From this notebook by K.Turgutlu.
Step5: The goal is to basically replicated the basic architecture of a ResNet. The simple model above gets an accuracy of around 47%, with 120,000 parameters. Not great. We're deffinitely not using our parameters very well -- they're treating each pixel with a different weight.
Step6: nn.Conv2d(layers[i], layers[i + 1], kernel_size=3, stride=2)
Step7: learn = ConvLearner.from_model_data(ConvNet([3, 20, 40, 80], 10), data)
Step8: To turn the output of the ConvNet into a prediction of one of ten classes is use Adaptive Max Pooling. Standard now for SotA algorithms. A Max Pool is done on the very last layer. Instead of a 2x2 or 3x3 or X-by-X, in Adaptive Max Pooling, we don't tell the algorithm how big an area to pool, instead we tell it how big a resolution to create.
Step9: We have around 30,000 parameters in the ConvNet, about a quarter that in the simple FullNet, and our accuracy is around 57%, up from 47%.
Step10: What's awesome about PyTorch is that a Layer definition and a Neural Network definition are literally identical. They both have a Constructor, and a Forward. Any time you have a layer, you can use it as a neural net, and vice versa.
Step11: 5. BatchNorm
Step12: this is normalizing our input automatically per channel, and for later layers
Step13: NOTE
Step14: 6 Deep BatchNorm
Step15: Notice making the model deeper hasn't helped. It's possible to train a standard ConvNet 12 layers deep, but it's hard to do properly. Instead we're going to replace the ConvNet with a ResNet.
Step16: And now this model is going to train beautifully just because of that one line
Step17: The idea is, if we have some inputs coming in and a function trying to predict how much the error is, then add on another prediction of error at that new stage, and on and on, then each time we're zooming in closer and closer to the correct answer -- ie
Step18: Other than the minor simplification of ResNet, this is a reasonable approximation of a good starting point for a modern architecture.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from fastai.conv_learner import *
PATH = "data/cifar10/"
os.makedirs(PATH, exist_ok=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))
def get_data(sz,bs):
tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad=sz//8)
return ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)
bs=256
data = get_data(32, 4)
x,y = next(iter(data.trn_dl))
plt.imshow(data.trn_ds.denorm(x)[0]);
plt.imshow(data.trn_ds.denorm(x)[3]);
data = get_data(32,bs)
lr=1e-2
class SimpleNet(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = nn.ModuleList([
nn.Linear(layers[i], layers[i+1]) for i in range(len(layers) - 1)])
def forward(self, x):
x = x.view(x.size(0), -1)
for λ in self.layers:
λ_x = λ(x)
x = F.relu(λ_x)
return F.log_softmax(λ_x, dim=-1)
learn = ConvLearner.from_model_data(SimpleNet([32*32*3, 40, 10]), data)
learn, [o.numel() for o in learn.model.parameters()]
[o for o in learn.model.parameters()]
learn.summary()
learn.lr_find()
learn.sched.plot()
%time learn.fit(lr,2)
%time learn.fit(lr, 2, cycle_len=1)
class ConvNet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.layers = nn.ModuleList([
nn.Conv2d(layers[i], layers[i + 1], kernel_size=3, stride=2)
for i in range(len(layers) - 1)])
self.pool = nn.AdaptiveMaxPool2d(1)
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
for λ in self.layers: x = F.relu(λ(x))
x = self.pool(x)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvNet([3, 20, 40, 80], 10), data)
learn.summary()
learn.lr_find(end_lr=100)
learn.sched.plot()
%time learn.fit(1e-1, 2)
%time learn.fit(1e-1, 4, cycle_len=1)
class ConvLayer(nn.Module):
def __init__(self, ni, nf):
super().__init__()
self.conv = nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1)
def forward(self, x): return F.relu(self.conv(x))
class ConvNet2(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.layers = nn.ModuleList([ConvLayer(layers[i], layers[i + 1])
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
for λ in self.layers: x = λ(x)
x = F.adaptive_max_pool2d(x, 1) # F is nn.Functional
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvNet2([3, 20, 40, 80], 10), data)
learn.summary()
%time learn.fit(1e-1, 2)
%time learn.fit(1e-1, 2, cycle_len=1)
class BnLayer(nn.Module):
def __init__(self, ni, nf, stride=2, kernel_size=3):
super().__init__()
self.conv = nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride,
bias=False, padding=1)
self.a = nn.Parameter(torch.zeros(nf,1,1))
self.m = nn.Parameter(torch.ones(nf, 1,1))
def forward(self, x):
x = F.relu(self.conv(x))
x_chan = x.transpose(0,1).contiguous().view(x.size(1), -1)
if self.training: # true for training set. false for val set.
self.means = x_chan.mean(1)[:, None, None]
self.stds = x_chan.std(1)[:, None, None]
return (x-self.means) / self.stds * self.m + self.a
class ConvBnNet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i + 1])
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for λ in self.layers: x = λ(x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvBnNet([10, 20, 40, 80, 160], 10), data)
learn.summary()
%time learn.fit(3e-2, 2)
%time learn.fit(1e-1, 4, cycle_len=1)
t1 = [chr(ord('a')+i) for i in range(10)]
t2 = [chr(ord('ა')+i) for i in range(10)]
for a,b in zip(t1, t2):
print(a)
print(b)
class ConvBnNet2(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([BnLayer(layers[i+1], layers[i+1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for λ, λ2 in zip(self.layers, self.layers2):
x = λ(x)
x = λ2(x)
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(ConvBnNet2([10,20,40,80,160],10), data)
%time learn.fit(1e-2, 2)
%time learn.fit(1e-2, 2, cycle_len=1)
class ResnetLayer(BnLayer):
def forward(self, x): return x + super().forward(x)
class Resnet(nn.Module):
def __init__(self, layers, c):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
def forward(self, x):
x = self.conv1(x)
for λ,λ2,λ3 in zip(self.layers, self.layers2, self.layers3):
x = λ3(λ2(λ(x))) # function of a function of a function
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
return F.log_softmax(self.out(x), dim=-1)
learn = ConvLearner.from_model_data(Resnet([10,20,40,80,160], 10), data)
wd=1e-5
%time learn.fit(1e-2, 2, wds=wd)
%time learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)
%time learn.fit(1e-2, 8, cycle_len=4, wds=wd)
class Resnet2(nn.Module):
def __init__(self, layers, c, p=0.5):
super().__init__()
self.conv1 = BnLayer(3, 16, stride=1, kernel_size=7)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])
for i in range(len(layers) - 1)])
self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)
for i in range(len(layers) - 1)])
self.out = nn.Linear(layers[-1], c)
self.drop = nn.Dropout(p) # dropout added
def forward(self, x):
x = self.conv1(x)
for λ,λ2,λ3 in zip(self.layers, self.layers2, self.layers3):
x = λ3(λ2(λ(x)))
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), -1)
x = self.drop(x)
return F.log_softmax(self.out(x), dim=-1)
# all sizes increased; 0.2 dropout
learn = ConvLearner.from_model_data(Resnet2([16, 32, 64, 128, 256], 10, 0.2), data)
wd=1e-6
%time learn.fit(1e-2, 2, wds=wd)
%time learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)
%time learn.fit(1e-2, 8, cycle_len=4, wds=wd)
learn.save('tmp')
log_preds,y = learn.TTA()
preds = np.mean(np.exp(log_preds), 0)
metrics.log_loss(y,preds), accuracy(preds,y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define SQL Query
Step2: Get Connection & Credentials
Step3: Running Querying
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import collections
import getpass
import json
import os
import yaml
import pandas
import pymysql
select_stmt_base = "select id, deactivated, group_id, created_at, notes from pids where created_at < '{}'"
if os.path.exists("../conf/db.yml"):
print("Using conf/db.yml for configuration")
db = yaml.safe_load(open("../conf/db.yml", "r"))
host = db["db_host"]
user = db["db_username"]
password = db["db_password"]
else:
print("Could not find db conf, asking user for credentials")
host = input("Host: ")
user = input("Username: ")
password = getpass.getpass("Password: ")
# create connection
conn = pymysql.connect(
host=host,
port=int(3306),
user=user,
passwd=password,
db="pid")
date = input("Enter a date (YYYY-MM-DD): ")
# run query to get dataframe
df = pandas.read_sql_query(select_stmt_base.format(date),
conn)
filter_scp = df["group_id"]=="SCP"
filter_ucsd = df["group_id"]=="UCSD"
filter_scp_notes = df["notes"].str.lower().str.contains("scp")
filter_ucsd_notes = df["notes"].str.lower().str.contains("ucsd")
filter_de = df["deactivated"]==0
scp_all = df[(filter_scp)]['id'].count()
scp_de = df[(filter_scp) & (filter_de)]['id'].count()
ucsd_all = df[(filter_ucsd)]['id'].count()
ucsd_de = df[(filter_ucsd) & (filter_de)]['id'].count()
shared_de = df[((filter_ucsd & filter_scp_notes) | (filter_scp & filter_ucsd_notes)) & filter_de ]['id'].count()
shared_all = df[((filter_ucsd & filter_scp_notes) | (filter_scp & filter_ucsd_notes)) ]['id'].count()
print("SCP Total: {}".format(scp_all))
print("SCP Active: {}".format(scp_de))
print("UCSD Total: {}".format(ucsd_all))
print("UCSD Active: {}".format(ucsd_de))
print("Shared Total: {}".format(shared_all))
print("Shared Active: {}".format(shared_de))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we turn separate coordinate array into triplets.
Step2: Zoom in a little
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
N = 30000
x = np.zeros(N)
y = np.zeros(N)
z = np.zeros(N)
x1 = np.empty_like(x)
y1 = np.empty_like(y)
z1 = np.empty_like(z)
# Sierpinski triangle iterative functions
def f1(x,y,z,x1,y1,z1,c):
x1[c] = 1.0/2.0*x[c]
y1[c] = 1.0/2.0*y[c]
z1[c] = 1.0/2.0*z[c]
def f2(x,y,z,x1,y1,z1,c):
x1[c] = 1.0/2.0*x[c] + 1/2.0
y1[c] = 1.0/2.0*y[c]
z1[c] = 1.0/2.0*z[c]
def f3(x,y,z,x1,y1,z1,c):
x1[c] = 1.0/2.0*x[c] + 1/4.
y1[c] = 1.0/2.0*y[c] + np.sqrt(3)/4
z1[c] = 1.0/2.0*z[c]
def f4(x,y,z,x1,y1,z1,c):
x1[c] = 1.0/2.0*x[c] + 1/4.
y1[c] = 1.0/2.0*y[c] + 1./4
z1[c] = 1.0/2.0*z[c] + np.sqrt(3)/4
functions = [f1, f2, f3, f4]
probabilities = [1/4.]*4
assert(len(functions) == len(probabilities))
X,Y,Z = x,y,z
for i in range(20):
# pick indices for each function to be applied
r = np.random.choice(len(probabilities), size=N, p=probabilities)
for i, f in enumerate(functions):
f(x, y, z, x1, y1, z1, r==i)
x,x1 = x1,x
y,y1 = y1,y
z,z1 = z1,z
if i > 0:
X, Y, Z = np.hstack([X,x]), np.hstack([Y,y]), np.hstack([Z,z])
# how much memory are we using, how many points there are
print(3*X.nbytes//1024**2,"MB",X.shape[0])
positions = np.vstack([X, Y, Z]).T.copy()
import k3d
plot = k3d.plot()
point_plot = k3d.points(positions.astype(np.float32), color=0xff0000, point_size=0.003, shader='3d')
plot += point_plot
plot.display()
plot.camera = [-0.59265772150826,
1.0966590944867525,
0.15381683182413644,
0.35173312413637553,
0.35752558265043016,
0.3151305910837551,
-0.5602813338387698,
-0.7160643522547137,
-0.41633720753942915]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unsupervised Clustering using K-Means
Step2: Supervised Classification using Decision Trees
Step3: Now, we use a DecisionTree to learn a model and test our result
Step4: pretty good, isn't it?
Step5: seems like versicolor and virginicia are more similar then setosa
|
<ASSISTANT_TASK:>
Python Code:
measurements = [
{'city': 'Dubai', 'temperature': 33.},
{'city': 'London', 'temperature': 12.},
{'city': 'San Francisco', 'temperature': 18.},
]
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
tf_measurements = vec.fit_transform(measurements)
tf_measurements.toarray()
vec.get_feature_names()
#disable some annoying warning
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#load the iris datasets
import sklearn.datasets
data = sklearn.datasets.load_iris()
data.data.shape
from sklearn.cluster import KMeans
iris_pred = KMeans(n_clusters=3, random_state = 102).fit_predict(data.data)
plt.figure(figsize=(12, 12))
colors = sns.color_palette()
plt.subplot(211)
plt.scatter(data.data[:, 0], data.data[:, 1], c=[colors[i] for i in iris_pred], s=40)
plt.title('KMeans-3 clusterer')
plt.xlabel(data.feature_names[0])
plt.ylabel(data.feature_names[1])
plt.subplot(212)
plt.scatter(data.data[:, 0], data.data[:, 1], c=[colors[i] for i in data.target],s=40)
plt.title('Ground Truth')
plt.xlabel(data.feature_names[0])
plt.ylabel(data.feature_names[1])
import sklearn.cross_validation
data_train, data_test, target_train, target_test = sklearn.cross_validation.train_test_split(
data.data, data.target, test_size=0.20, random_state = 5)
print(data.data.shape, data_train.shape, data_test.shape)
from sklearn.tree import DecisionTreeClassifier
instance = DecisionTreeClassifier()
r = instance.fit(data_train, target_train)
target_predict = instance.predict(data_test)
from sklearn.metrics import accuracy_score
print('Prediction accuracy: ',accuracy_score(target_predict, target_test))
from sklearn import manifold
#create mds instance
mds = manifold.MDS(n_components=2, random_state=5)
#fit the model and get the embedded coordinates
pos = mds.fit(data.data).embedding_
plt.scatter(pos[:, 0], pos[:, 1], s=20, c=[colors[i] for i in data.target])
#create a legend since we just have one plot and not three fake the legend using patches
import matplotlib.patches as mpatches
patches = [ mpatches.Patch(color=colors[i], label=data.target_names[i]) for i in range(3) ]
plt.legend(handles=patches)
plt.legend()
#compare with e.g. PCA
from sklearn import decomposition
pca = decomposition.PCA(n_components=2)
pca_pos = pca.fit(data.data).transform(data.data)
mds_pos = mds.fit(data.data).embedding_
plt.figure(figsize=[20,7])
plt.subplot(121)
plt.scatter(mds_pos[:, 0], mds_pos[:, 1], s=30, c=[colors[i] for i in data.target])
plt.title('MDS')
plt.subplot(122)
plt.scatter(pca_pos[:, 0], pca_pos[:, 1], s=30, c=[colors[i] for i in data.target])
plt.title('PCA')
from IPython.html.widgets import interact
colors = sns.color_palette(n_colors=10)
mds_pos = mds.fit(data.data).embedding_
@interact(n_clusters=(1,10))
def draw_plot(n_clusters):
instance = KMeans(n_clusters=n_clusters, random_state = 102)
clusters_assignment = instance.fit_predict(data.data)
plt.scatter(mds_pos[:, 0], mds_pos[:, 1], s=20, c=[colors[i] for i in clusters_assignment])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Kernel
Step3: Model fitting
Step4: Using our loss function defined above, we'll run a gradient based optimization routine from scipy (you could also use a jax-specific optimizer, but that's not necessary) to fit this model as follows
Step5: Warning
|
<ASSISTANT_TASK:>
Python Code:
try:
import tinygp
except ImportError:
!pip install -q tinygp
from jax.config import config
config.update("jax_enable_x64", True)
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.datasets import co2
data = co2.load_pandas().data
t = 2000 + (np.array(data.index.to_julian_date()) - 2451545.0) / 365.25
y = np.array(data.co2)
m = np.isfinite(t) & np.isfinite(y) & (t < 1996)
t, y = t[m][::4], y[m][::4]
plt.plot(t, y, ".k")
plt.xlim(t.min(), t.max())
plt.xlabel("year")
_ = plt.ylabel("CO$_2$ in ppm")
plt.savefig("gp-mauna-loa-data.pdf")
import jax
import jax.numpy as jnp
from tinygp import kernels, transforms, GaussianProcess
def build_gp(theta, X):
mean = theta[-1]
# We want most of out parameters to be positive so we take the `exp` here
# Note that we're using `jnp` instead of `np`
theta = jnp.exp(theta[:-1])
# Construct the kernel by multiplying and adding `Kernel` objects
k1 = theta[0] ** 2 * kernels.ExpSquared(theta[1])
k2 = theta[2] ** 2 * kernels.ExpSquared(theta[3]) * kernels.ExpSineSquared(period=theta[4], gamma=theta[5])
k3 = theta[6] ** 2 * kernels.RationalQuadratic(alpha=theta[7], scale=theta[8])
k4 = theta[9] ** 2 * kernels.ExpSquared(theta[10])
kernel = k1 + k2 + k3 + k4
return GaussianProcess(kernel, X, diag=theta[11] ** 2, mean=mean)
def neg_log_likelihood(theta, X, y):
gp = build_gp(theta, X)
return -gp.condition(y)
# Objective
obj = jax.jit(jax.value_and_grad(neg_log_likelihood))
# These are the parameters from R&W
mean_output = 340.0
theta_init = np.append(
np.log([66.0, 67.0, 2.4, 90.0, 1.0, 4.3, 0.66, 1.2, 0.78, 0.18, 1.6, 0.19]),
mean_output,
)
obj(theta_init, t, y)
from scipy.optimize import minimize
soln = minimize(obj, theta_init, jac=True, args=(t, y))
print(f"Final negative log likelihood: {soln.fun}")
x = np.linspace(max(t), 2025, 2000)
gp = build_gp(soln.x, t)
mu, var = gp.predict(y, x, return_var=True)
plt.plot(t, y, ".k")
plt.fill_between(x, mu + np.sqrt(var), mu - np.sqrt(var), color="C0", alpha=0.5)
plt.plot(x, mu, color="C0", lw=2)
plt.xlim(t.min(), 2025)
plt.xlabel("year")
_ = plt.ylabel("CO$_2$ in ppm")
plt.savefig("gp-mauna-loa-pred.pdf")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connect to server
Step2: <hr> Simple matrix
Step3: <hr> Different shapes
Step4: <hr> Colors
Step5: <hr> Labels
Step6: You can also turn on labeling of cells by their value.
|
<ASSISTANT_TASK:>
Python Code:
from lightning import Lightning
from numpy import random, arange, asarray, corrcoef, argsort, array
import networkx as nx
from sklearn import datasets
lgn = Lightning(ipython=True, host='http://public.lightning-viz.org')
mat = random.randn(10,10)
lgn.matrix(mat)
mat = random.randn(10,20)
lgn.matrix(mat)
mat = random.randn(20,10)
lgn.matrix(mat)
mat = random.rand(10,10)
lgn.matrix(mat, colormap='Reds')
mat = random.rand(10,10)
lgn.matrix(mat, colormap='Spectral')
n, m = (8, 16)
mat = arange(n*m).reshape(n,m)
rows = ['row ' + str(i) for i in range(n)]
columns = ['col ' + str(i) for i in range(m)]
lgn.matrix(mat, row_labels=rows, column_labels=columns)
mat = arange(n*m).reshape(n,m)
lgn.matrix(mat, numbers=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As you can see the we have 60,000 training examples with 784 features. Let's see how long it takes to train and predict on this high dimensional dataset
Step2: On my modest hardware (2016 macbook pro) it took about 1 minute (54 s) to train and predict with a 4.3% error rate -- pretty good!
Step3: Once we have a convariance matrix, we can get the eigen values and the eigen vectors.
Step4: Each eigen value represents the variance of that feature. By definition, the total variation is given by the sum of the variances. It turns out that this is also equal to the sum of the eigenvalues of the covariance matrix. Thus, the total variation is
Step5: As we can see from the output above, the total variance of our dataset is
Step6: This means that each feature has a variance that contributes to the overall variance. Is variance equally distributed among features? Are some features more expressive of the dataset and some not contributing any infomration? If so, then we can remove those features form our training set.
Step7: The above figure shows how much variance each feature contributes to the whole. The x axis represents our eigen value/vector pairs which correspond to a feature in the original dataset. We can see that after about 150 vectors, the subsequent vectors don't contibute much to the whole. We can probably cut out the last 634 vectors w/ot loosing much accuracy with our calssifier while at the same time decreasing training time!
Step8: Just to quantify the statement above
Step9: At this point, we have the same Eigenvalues and Eigenvectors that we generated ourselves in the previous steps. However, now we only selected the top 150 and Scikit calls them components_.
Step10: As you can see, we now have a 150 feature training set w/ only useful information. Lets benchmark it against the classification job we did pre-PCA
Step11: Results
|
<ASSISTANT_TASK:>
Python Code:
import loader as support #support library to read mnist files into memory
import gaussian_classifier as gf
import time
%pylab inline
X_train, Y_train = support.loadmnist('data/train-images-idx3-ubyte',
'data/train-labels-idx1-ubyte')
X_test, Y_test = support.loadmnist('data/t10k-images-idx3-ubyte',
'data/t10k-labels-idx1-ubyte')
X_train.shape
clf = gf.GaussianClassifier(c=3400)
start = time.time()
clf.fit(X_train, Y_train)
Y = clf.predict(X_test)
errors = (Y_test != Y).sum();total = X_test.shape[0]
print("Error rate:\t %d/%d = %f" % ((errors,total,(errors/float(total)))))
end = time.time()
duration = end-start
duration
cov = numpy.cov(X_train.T)
evals, evecs = numpy.linalg.eig(cov)
evals = np.float64(evals)
evecs = np.float64(evecs)
total_variation = np.sum(evals)
np.float64(total_variation)
explained_variance_ratio = []
explained_variance = []
for i in range(0,X_train.shape[1]):
explained_variance.append(evals[i])
variance = evals[i] / total_variation
explained_variance_ratio.append(variance)
explained_variance_ratio
print sum(explained_variance_ratio)
cumulative_explained = cumsum(explained_variance_ratio)
plot(cumulative_explained);
grid()
plt.figure(1, figsize=(8, 6))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(explained_variance_ratio, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
grid()
np.sum(explained_variance_ratio[:150])
np.sum(explained_variance_ratio[150:])
from sklearn.decomposition import PCA
pca = PCA(n_components=150)
pca.fit(X_train)
pca.components_.shape
X_train_150 = pca.transform(X_train)
X_test_150 = pca.transform(X_test)
clf = gf.GaussianClassifier(c=1)
start = time.time()
clf.fit(X_train_150, Y_train)
Y = clf.predict(X_test_150)
errors = (Y_test != Y).sum(); total = X_test_150.shape[0]
print("Error rate:\t %d/%d = %f" % ((errors, total, (errors/float(total)))))
end = time.time()
duration = end-start
duration
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure(2, figsize=(8, 6))
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This scenario, as well as all other scenarios in Flow, is parametrized by the following arguments
Step2: 2.2 VehicleParams
Step3: Once this object is created, vehicles may be introduced using the add method. This method specifies the types and quantities of vehicles at the start of a simulation rollout. For a description of the various arguements associated with the add method, we refer the reader to the following documentation (VehicleParams.add).
Step4: Another controller we define is for the vehicle's routing behavior. For closed network where the route for any vehicle is repeated, the ContinuousRouter controller is used to perpetually reroute all vehicles to the initial set route.
Step5: Finally, we add 22 vehicles of type "human" with the above acceleration and routing behavior into the Vehicles class.
Step6: 2.3 NetParams
Step7: Importing the ADDITIONAL_NET_PARAMS dict from the ring road scenario, we see that the required parameters are
Step8: 2.4 InitialConfig
Step9: 2.5 TrafficLightParams
Step10: 3. Setting up an Environment
Step11: Although we will not be training any autonomous agents in this exercise, the use of an environment allows us to view the cumulative reward simulation rollouts receive in the absence of autonomy.
Step12: 3.2 EnvParams
Step13: Importing the ADDITIONAL_ENV_PARAMS variable, we see that it consists of only one entry, "target_velocity", which is used when computing the reward function associated with the environment. We use this default value when generating the EnvParams object.
Step14: 4. Setting up and Running the Experiment
Step15: These objects may be used to simulate rollouts in the absence of reinforcement learning agents, as well as acquire behaviors and rewards that may be used as a baseline with which to compare the performance of the learning agent. In this case, we choose to run our experiment for one rollout consisting of 3000 steps (300 s).
Step16: As we can see from the above simulation, the initial perturbations in the network instabilities propogate and intensify, eventually leading to the formation of stop-and-go waves after approximately 180s.
Step17: The .xml file contains various vehicle-specific parameters at every time step. This information is transferred to a .csv file if the convert_to_csv parameter in exp.run() is set to True. This file looks as follows
|
<ASSISTANT_TASK:>
Python Code:
from flow.scenarios.loop import LoopScenario
name = "ring_example"
from flow.core.params import VehicleParams
vehicles = VehicleParams()
from flow.controllers.car_following_models import IDMController
from flow.controllers.routing_controllers import ContinuousRouter
vehicles.add("human",
acceleration_controller=(IDMController, {}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=22)
from flow.scenarios.loop import ADDITIONAL_NET_PARAMS
print(ADDITIONAL_NET_PARAMS)
from flow.core.params import NetParams
net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS)
from flow.core.params import InitialConfig
initial_config = InitialConfig(spacing="uniform", perturbation=1)
from flow.core.params import TrafficLightParams
traffic_lights = TrafficLightParams()
from flow.envs.loop.loop_accel import AccelEnv
from flow.core.params import SumoParams
sumo_params = SumoParams(sim_step=0.1, render=True, emission_path='data')
from flow.envs.loop.loop_accel import ADDITIONAL_ENV_PARAMS
print(ADDITIONAL_ENV_PARAMS)
from flow.core.params import EnvParams
env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)
from flow.core.experiment import Experiment
# create the scenario object
scenario = LoopScenario(name="ring_example",
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=traffic_lights)
# create the environment object
env = AccelEnv(env_params, sumo_params, scenario)
# create the experiment object
exp = Experiment(env)
# run the experiment for a set number of rollouts / time steps
_ = exp.run(1, 3000, convert_to_csv=True)
import os
emission_location = os.path.join(exp.env.sim_params.emission_path, exp.env.scenario.name)
print(emission_location + '-emission.xml')
import pandas as pd
pd.read_csv(emission_location + '-emission.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A note about scopes and namespaces
Step2: List-comprehensions (and all other comprehensions) have their own scope
Step3: Note
Step5: The attribute pi is present in both namespaces, so that there are no conflicts between variables or functions with the same name.
Step6: MyClass.i and MyClass.f are valid attribute references, returning an integer and a function object. Class attributes can also be assigned to, so you can change the value of MyClass.i by assignment. __doc__ is also a valid attribute, returning the docstring belonging to the class.
Step7: New class instances can be created with specific initial variables, either with default values or user-defined ones. The __init__ method is used for this task, usually as the first method in the class definition. If __init__ has any positional arguments, an instance cannot be created without providing them.
Step8: What about the self variable?
Step9: On top of the attributes (variables) and methods (functions) created when a class instance is initiated, we can attach attributes to an already existing class instance
Step10: Class and instance variables
Step11: Warning
Step12: Inheritance
Step13: An underappreciated advantage of inheritance is that it is allows to expand classes that belong to different namespaces. This means that even classes belonging to different modules (or even the base namespace) can be expanded.
Step14: Public and private attributes/methods
Step15: In the above example, the _private attribute is not meant to be called by the class user, but it can still be easily accessed. In languages like C++ accessing or changing the value of a private attribute would trigger an error. In python it is possible but might interfere with the intended purpose of that attribute/method.
Step16: We have created a new attribute called __private, but the original class attribute has not been changed. That is because name mangling has transformed the __private attribute to _Reverser__private internally.
Step17: Operators
Step18: The sum operator is in fact a method of the int class. The following expression is exactly equivalent to calling x + y.
Step19: A comprehensive list of operators that can be implemented for any given class can be found here. It's worth noting that many of those operators are already implemented for any class. Re-implementing an existing operator (or more generally a method) is termed overloading.
Step20: For instance, the __eq__ method implements the == boolean operation. The basic implementation checks whether two instances are exactly the same, a behaviour that is not always intuitive.
Step21: Other interesting operators
|
<ASSISTANT_TASK:>
Python Code:
print(dir(bool))
def f():
x = 1
print(x)
x = 2
f()
print(x)
x = 2
a = [x**2 for x in range(10)]
print(a)
print(x)
import math
import numpy
print(math.pi, numpy.pi)
# don't do this at home
math.pi = 2
print(math.pi, numpy.pi)
class MyClass:
A simple example class
i = 12345
def __init__(self):
self.data = []
def f(self):
return 'hello world'
x = MyClass()
print(x.i)
print(x.f())
print(x.__doc__)
print(x.i)
x.i = 2
print(x.i)
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
def generic_method(self, value):
print(value)
x = Complex()
x = Complex(1.1, -2.3)
x.r, x.i
x.generic_method(100)
Complex.generic_method(x, 100)
x.counter = 1
while x.counter < 10:
x.counter = x.counter * 2
print(x.counter)
del x.counter
x.counter
class Dog:
# class variable shared by all instances
kind = 'canine'
def __init__(self, name):
# instance variable unique to each instance
self.name = name
d = Dog('Fido')
e = Dog('Buddy')
# shared by all dogs
print(d.kind)
print(e.kind)
# unique to each instance
print(d.name)
print(e.name)
class Dog:
# this is ok
kind = 'canine'
# mutable class variable
tricks = []
def __init__(self, name):
self.name = name
def add_trick(self, trick):
self.tricks.append(trick)
d = Dog('Fido')
e = Dog('Buddy')
# operating on the `tricks` class variable in two separate instances
d.add_trick('roll over')
e.add_trick('play dead')
# changing the `kind` class variable
e.kind = 'super-dog'
print(d.kind)
print(d.tricks)
# base class
class Sequence:
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
# inherits Sequence,
# has specific attributes and methods
class Dna(Sequence):
def reverse_complement(self):
translation_table = str.maketrans('ACGTacgt', 'TGCAtgca')
revcomp_sequence = self.sequence.translate(translation_table)[::-1]
return revcomp_sequence
# inherits Sequence,
# has specific attributes and methods
class Protein(Sequence):
def get_exon_length(self):
return len(self.sequence) * 3
dna = Dna('gene1', 'ACTGCGACCAAGACATAG')
dna.reverse_complement()
prot = Protein('protein1', 'MPNFFIDRPIFAWVIAIIIMLAGGLAILKLPVAQYPTIAP')
prot.reverse_complement()
prot = Protein('protein1', 'MPNFFIDRPIFAWVIAIIIMLAGGLAILKLPVAQYPTIAP')
prot.get_exon_length()
class BetterInt(int):
def is_odd(self):
return bool(self % 2)
x = BetterInt(2)
x.is_odd()
class Reverser():
def __init__(self, name):
self.public = name
self._private = name[::-1]
def get_reverse(self):
return self._private
x = Reverser('hello world')
print(x.public)
print(x.get_reverse())
x._private = 'luddism'
print(x.get_reverse())
class Reverser():
def __init__(self, name):
self.public = name
self.__private = name[::-1]
def get_reverse(self):
return self.__private
x = Reverser('hello world')
print(x.public)
print(x.get_reverse())
x.__private = 'luddism'
print(x.get_reverse())
print(x.__private)
print(x._Reverser__private)
x = 1
y = 2
x + 2
x = 1
y = 2
x.__add__(y)
x = Protein('prot1', 'MPNFFIDRPIFAWVIAIIIMLAGGLAILKLPVAQYPTIAP')
dir(x)
p1 = Protein('prot1', 'MPNFFIDRPIFAWVIAIIIMLAGGLAILKLPVAQYPTIAP')
p2 = Protein('prot1', 'MPNFFIDRPIFAWVIAIIIMLAGGLAILKLPVAQYPTIAP')
p1 == p2
# let's fix it
class Protein(Sequence):
def get_exon_length(self):
return len(self.sequence) * 3
def __eq__(self, other_instance):
return self.sequence == other_instance.sequence
p1 = Protein('prot1', 'MPNFFIDRPIFAWVIAIIIMLAGGLAILKLPVAQYPTIAP')
p2 = Protein('prot1', 'MPNFFIDRPIFAWVIAIIIMLAGGLAILKLPVAQYPTIAP')
p1 == p2
def sum_two_things(a, b):
return a + b
sum_two_things(1, 2)
sum_two_things('a', 'b')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: tf.distribute.Strategy with Training Loops
Step2: Download the fashion mnist dataset
Step3: Create a strategy to distribute the variables and the graph
Step4: Setup input pipeline
Step5: tf.distribute.Strategy.experimental_distribute_dataset evenly distributes the dataset across all the replicas.
Step6: Model Creation
Step7: Define the loss function
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import TensorFlow
import tensorflow.compat.v1 as tf
# Helper libraries
import numpy as np
import os
print(tf.__version__)
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Adding a dimension to the array -> new shape == (28, 28, 1)
# We are doing this because the first layer in our model is a convolutional
# layer and it requires a 4D input (batch_size, height, width, channels).
# batch_size dimension will be added later on.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Getting the images in [0, 1] range.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
# If the list of devices is not specified in the
# `tf.distribute.MirroredStrategy` constructor, it will be auto-detected.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_ds = strategy.experimental_distribute_dataset(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_ds = strategy.experimental_distribute_dataset(test_dataset)
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
with strategy.scope():
def train_step(dist_inputs):
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.nn.compute_average_loss(cross_entropy, global_batch_size=BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.run(
step_fn, args=(dist_inputs,))
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
train_iterator = train_ds.make_initializable_iterator()
iterator_init = train_iterator.initializer
var_init = tf.global_variables_initializer()
loss = train_step(next(train_iterator))
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.