Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
7,800
|
<ASSISTANT_TASK:>
Python Code:
from conf import LisaLogging
LisaLogging.setup()
%pylab inline
import json
import os
# Support to access the remote target
import devlib
from env import TestEnv
# Import support for Android devices
from android import Screen, Workload, System, ViewerWorkload
from target_script import TargetScript
# Support for trace events analysis
from trace import Trace
# Suport for FTrace events parsing and visualization
import trappy
import pandas as pd
import sqlite3
from IPython.display import display
# Setup target configuration
my_conf = {
# Target platform and board
"platform" : 'android',
"board" : 'hikey960',
# Device serial ID
# Not required if there is only one device connected to your computer
"device" : "0123456789ABCDEF",
# Android home
# Not required if already exported in your .bashrc
#"ANDROID_HOME" : "/home/vagrant/lisa/tools/",
# Folder where all the results will be collected
"results_dir" : "Viewer_example",
# Define devlib modules to load
"modules" : [
'cpufreq' # enable CPUFreq support
],
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"events" : [
"sched_switch",
"sched_wakeup",
"sched_wakeup_new",
"sched_overutilized",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_load_waking_task",
"cpu_capacity",
"cpu_frequency",
"cpu_idle",
"sched_tune_config",
"sched_tune_tasks_update",
"sched_tune_boostgroup_update",
"sched_tune_filter",
"sched_boost_cpu",
"sched_boost_task",
"sched_energy_diff"
],
"buffsize" : 100 * 1024,
},
# Tools required by the experiments
"tools" : [ 'trace-cmd', 'taskset'],
}
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False)
target = te.target
class GmapsViewer(ViewerWorkload):
def pre_interact(self):
self.script = TargetScript(te, "gmaps_swiper.sh")
# Define commands to execute during experiment
for i in range(2):
System.hswipe(self.script, 40, 60, 100, False)
self.script.append('sleep 1')
System.vswipe(self.script, 40, 60, 100, True)
self.script.append('sleep 1')
System.hswipe(self.script, 40, 60, 100, True)
self.script.append('sleep 1')
System.vswipe(self.script, 40, 60, 100, False)
self.script.append('sleep 1')
# Push script to the target
self.script.push()
def interact(self):
self.script.run()
def experiment():
# Configure governor
target.cpufreq.set_all_governors('sched')
# Get workload
wload = Workload.getInstance(te, 'gmapsviewer')
# Run workload
wload.run(out_dir=te.res_dir,
collect="ftrace",
uri="https://goo.gl/maps/D8Sn3hxsHw62")
# Dump platform descriptor
te.platform_dump(te.res_dir)
results = experiment()
# Load traces in memory (can take several minutes)
platform_file = os.path.join(te.res_dir, 'platform.json')
with open(platform_file, 'r') as fh:
platform = json.load(fh)
trace_file = os.path.join(te.res_dir, 'trace.dat')
trace = Trace(platform, trace_file, events=my_conf['ftrace']['events'], normalize_time=False)
!kernelshark {trace_file} 2>/dev/null
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test environment setup
Step2: Workload definition
Step3: Workload execution
Step4: Traces visualisation
|
7,801
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.svm import SVR
from sklearn import cross_validation
np.random.seed(0)
n_samples = 200
kernels = ['linear', 'poly', 'rbf']
true_fun = lambda X: X ** 3
X = np.sort(5 * (np.random.rand(n_samples) - .5))
y = true_fun(X) + .01 * np.random.randn(n_samples)
plt.figure(figsize=(14, 5))
for i in range(len(kernels)):
ax = plt.subplot(1, len(kernels), i + 1)
plt.setp(ax, xticks=(), yticks=())
model = SVR(kernel=kernels[i], C=5)
model.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(model,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(3 * -.5, 3 * .5, 100)
plt.plot(X_test, model.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((-3 * .5, 3 * .5))
plt.ylim((-1, 1))
plt.legend(loc="best")
plt.title("Kernel {}\nMSE = {:.2e}(+/- {:.2e})".format(
kernels[i], -scores.mean(), scores.std()))
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
np.random.seed(0)
n_samples = 200
true_fun = lambda X: X ** 3
X = np.sort(5 * (np.random.rand(n_samples) - .5))
y = true_fun(X) + .02 * np.random.randn(n_samples)
X = X[:, None]
y = y
f, axarr = plt.subplots(1, 3)
axarr[0].scatter(X[::20], y[::20])
axarr[0].set_xlim((-3 * .5, 3 * .5))
axarr[0].set_ylim((-1, 1))
axarr[1].scatter(X[::10], y[::10])
axarr[1].set_xlim((-3 * .5, 3 * .5))
axarr[1].set_ylim((-1, 1))
axarr[2].scatter(X, y)
axarr[2].set_xlim((-3 * .5, 3 * .5))
axarr[2].set_ylim((-1, 1))
plt.show()
from sklearn.learning_curve import learning_curve
from sklearn.svm import SVR
training_sizes, train_scores, test_scores = learning_curve(SVR(kernel='linear'), X, y, cv=10, scoring="mean_squared_error",
train_sizes=[.6, .7, .8, .9, 1.])
# Use the negative because we want to minimize squared error
plt.plot(training_sizes, -train_scores.mean(axis=1), label="training scores")
plt.plot(training_sizes, -test_scores.mean(axis=1), label="test scores")
plt.ylim((0, 50))
plt.legend(loc='best')
from sklearn.learning_curve import learning_curve
from sklearn.svm import SVR
training_sizes, train_scores, test_scores = learning_curve(SVR(kernel='rbf'), X, y, cv=10, scoring="mean_squared_error",
train_sizes=[.6, .7, .8, .9, 1.])
# Use the negative because we want to minimize squared error
plt.plot(training_sizes, -train_scores.mean(axis=1), label="training scores")
plt.plot(training_sizes, -test_scores.mean(axis=1), label="test scores")
plt.ylim((0, 50))
plt.legend(loc='best')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Learning Curves
Step2: They all come from the same underlying process. But if you were asked to make a prediction, you would be more likely to draw a straight line for the left-most one, as there are only very few datapoints, and no real rule is apparent. For the dataset in the middle, some structure is recognizable, though the exact shape of the true function is maybe not obvious. With even more data on the right hand side, you would probably be very comfortable with drawing a curved line with a lot of certainty.
Step3: You can see that for the model with kernel = linear, the validation score doesn't really decrease as more data is given.
|
7,802
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from fastai.conv_learner import *
PATH = "data/cifar10/"
os.makedirs(PATH, exist_ok=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))
def get_data(sz, bs):
tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad = sz//8)
return ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)
bs = 64
data = get_data(32, 4)
x, y = next(iter(data.trn_dl))
plt.imshow(data.trn_ds.denorm(x)[0]);
plt.imshow(data.trn_ds.denorm(x)[1]);
sz = 64
from fastai.models import darknet
darknet53 = darknet.darknet_53()
tfms = tfms_from_stats(imagenet_stats, sz, aug_tfms=transforms_side_on, pad=sz//8)
model_data = ImageClassifierData.from_paths(PATH, bs=bs, tfms=tfms, val_name='test', num_workers=2)
learner = ConvLearner.from_model_data(darknet53, model_data)
learner.crit
darknet.Darknet()
f_model = resnet18
sz = 32
tfms = tfms_from_model(f_model, sz, aug_tfms=transforms_side_on, pad=sz//8)
model_data = ImageClassifierData.from_paths(PATH, bs=bs, tfms=tfms,
val_name='test', num_workers=2)
# learner = ConvLearner.from_model_data(f_model, model_data) # throws AttributeError: 'function' object has no attribute 'children'
learner = ConvLearner.pretrained(f_model, model_data)
learner.crit
# learner.summary()
# learner.model.children
model_data.c
from fastai.models import darknet
sz = 64
darknet53 = darknet.darknet_53()
tfms = tfms_from_stats(imagenet_stats, sz, aug_tfms=transforms_side_on, pad=sz//8)
model_data = ImageClassifierData.from_paths(PATH, bs=bs, tfms=tfms, val_name='test', num_workers=2)
learner = ConvLearner.from_model_data(darknet53, model_data)
learner.crit
# learner.summary()
class alt_ConvBN(nn.Module):
# convolutional layer then BatchNorm
def __init__(self, ch_in, ch_out, kernel_size=3, stride=1, padding=0):
super().__init__()
self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size, stride=stride,
padding=padding, bias=False)
self.bn = nn.BatchNorm2d(ch_out, momentum=0.01)
def forward(self, x):
return F.leaky_relu(self.bn(self.conv(x)), negative_slope=0.1)
class alt_DarknetBlock(nn.Module):
# the basic blocks
def __init__(self, ch_in):
super().__init__()
ch_hid = ch_in//2
self.conv1 = alt_ConvBN(ch_in, ch_hid, kernel_size=1, stride=1, padding=0)
self.conv2 = alt_ConvBN(ch_hid, ch_in, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
return out + x
class alt_Darknet(nn.Module):
# Replicates table 1 from the YOLOv3 paper
def __init__(self, num_blocks, num_classes=1000):
super().__init__()
self.conv = alt_ConvBN(3, 32, kernel_size=3, stride=1, padding=1)
self.layer1 = self.make_group_layer(32, num_blocks[0])
self.layer2 = self.make_group_layer(64, num_blocks[1], stride=2)
self.layer3 = self.make_group_layer(128,num_blocks[2], stride=2)
self.layer4 = self.make_group_layer(256,num_blocks[3], stride=2)
self.layer5 = self.make_group_layer(512,num_blocks[4], stride=2)
self.linear = nn.Linear(1024, num_classes)
def make_group_layer(self, ch_in, num_blocks, stride=1):
layers = [alt_ConvBN(ch_in, ch_in*2, stride=stride)]
for i in range(num_blocks):
layers.append(alt_DarknetBlock(ch_in*2))
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
return F.log_softmax(self.linear(out))
sz = 64
alt_darknet53 = alt_Darknet([1,2,8,8,4], num_classes=10)
learner = ConvLearner.from_model_data(alt_darknet53, model_data)
learner.crit
# learner.summary()
# learner.model.parameters
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Viewing Data
Step2: Learner Initialization Tests
Step3:
Step4: Could num_classes be used for loss-choice logic?
Step5: Darknet53 Tests
Step6: earlier vsn Darknet
|
7,803
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from numpy.polynomial import Chebyshev as T
from numpy.polynomial.hermite import hermval
%matplotlib inline
def p_cheb(x, n):
RETURNS T_n(x)
value of not normalized Chebyshev polynomials
$\int \frac1{\sqrt{1-x^2}}T_m(x)T_n(x) dx = \frac\pi2\delta_{nm}$
return T.basis(n)(x)
def p_herm(x, n):
RETURNS H_n(x)
value of non-normalized Probabilistic polynomials
cf = np.zeros(n+1)
cf[n] = 1
return (2**(-float(n)*0.5))*hermval(x/np.sqrt(2.0), cf)
def system_mat(pnts, maxn, poly):
RETURNS system matrix
A = np.empty((pnts.size, maxn), dtype=float)
for i in range(maxn):
A[:, i] = poly(pnts, i)
return A
x = np.linspace(-1, 1, 1000)
data = []
for i in range(5):
data.append(x)
data.append(p_cheb(x, i))
plt.plot(*data)
plt.legend(["power = {}".format(i) for i in range(len(data))]);
def complex_func(x):
return np.sin(2.0*x*np.pi)*np.cos(0.75*(x+0.3)*np.pi)
plt.plot(x, complex_func(x));
n = 6
M = n
nodes = np.linspace(-1, 1, M)
RH = complex_func(nodes)
A = system_mat(nodes, n, p_cheb)
if n == M:
alpha = np.linalg.solve(A, RH)
else:
alpha = np.linalg.lstsq(A, RH)[0]
print("α = {}".format(alpha))
def calc_apprximant(poly, alpha, x):
RETURNS values of approximant in points x
n = len(alpha)
y = np.zeros_like(x)
for i in range(n):
y[...] += poly(x, i)*alpha[i]
return y
y = complex_func(x)
approx_y = calc_apprximant(p_cheb, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro');
epsilon = np.linalg.norm(y - approx_y, np.inf)
print("ε = {}".format(epsilon))
A = system_mat(nodes, n, p_herm)
if n == M:
alpha = np.linalg.solve(A, RH)
else:
alpha = np.linalg.lstsq(A, RH)[0]
print("α = {}".format(alpha))
approx_y = calc_apprximant(p_herm, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro')
epsilon = np.linalg.norm(y - approx_y, np.inf)
print("ε = {}".format(epsilon))
nodes = np.cos((2.0*np.arange(M) + 1)/M*0.5*np.pi)
RH = complex_func(nodes)
A = system_mat(nodes, n, p_herm)
alpha = np.linalg.solve(A, RH)
print("α = {}".format(alpha))
approx_y = calc_apprximant(p_herm, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro')
epsilon_cheb = np.linalg.norm(y - approx_y, np.inf)
print("ε_cheb = {}".format(epsilon_cheb))
# All in one. We can play with maximum polynomial power
def plot_approx(f, n, distrib='unif', poly='cheb'):
def make_nodes(n, distrib='unif'):
return {'unif' : lambda : np.linspace(-1, 1, n),
'cheb' : lambda : np.cos((2.0*np.arange(n) + 1.0)/n*0.5*np.pi)}[distrib[:4].lower()]
poly_f = {'cheb' : p_cheb, 'herm' : p_herm}[poly[:4].lower()]
#solve
nodes = make_nodes(n, distrib)()
RH = f(nodes)
A = system_mat(nodes, n, p_herm)
alpha = np.linalg.solve(A, RH)
# calc values
x = np.linspace(-1, 1, 2**10)
y = f(x)
approx_y = calc_apprximant(p_herm, alpha, x)
#plot
plt.figure(figsize=(14,6.5))
plt.plot(x, y, x, approx_y, nodes, RH, 'ro')
plt.show()
# calc error
epsilon_cheb = np.linalg.norm(y - approx_y, np.inf)
print("ε = {}".format(epsilon_cheb))
from ipywidgets import interact, fixed, widgets
interact(plot_approx,
f=fixed(complex_func),
n=widgets.IntSlider(min=1,max=15,step=1,value=4,continuous_update=True,description='# of terms (n)'),
distrib=widgets.ToggleButtons(options=['Uniform', 'Chebyshev roots'],description='Points distr.'),
poly=widgets.ToggleButtons(options=['Chebyshev polynomials', 'Hermite polynomials'],description='Poly. type')
);
# Scale the function a little
scale = 5.0
big_x = np.random.randn(int(1e6))
big_y = complex_func(big_x/scale)
mean = np.mean(big_y)
var = np.std(big_y)**2
print ("mean = {}, variance = {}".format(mean, var))
def p_herm_snorm(n):
Square norm of "math" Hermite (w = exp(-x^2/2)/sqrt(2*pi))
return np.math.factorial(n)
n = 15
M = n
nodes = np.linspace(-scale, scale, M)
RH = complex_func(nodes/scale)
A = system_mat(nodes, n, p_herm)
if n == M:
alpha = np.linalg.solve(A, RH)
else:
W = np.diag(np.exp( -nodes**2*0.5))
alpha = np.linalg.lstsq(W.dot(A), W.dot(RH))[0]
h = np.array([p_herm_snorm(i) for i in range(len(alpha))])
var = np.sum(alpha[1:]**2*h[1:])
print ("mean = {}, variance = {}".format(alpha[0]*h[0], var))
ex = 2
x = np.linspace(-scale - ex, scale + ex, 10000)
y = complex_func(x/scale)
approx_y = calc_apprximant(p_herm, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro');
# 1-D example
from ipywidgets import interactive, interact, widgets
import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial as SP
# defining Squared Exponential Kernel and plot it
def k(length_scale):
x = np.arange(0., 5., 0.1)
plt.figure(figsize=(10, 7))
plt.ylim([0, 1.05])
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$k(x,0)$', fontsize=16)
plt.plot(x, np.exp(-.5 * x**2/length_scale**2), 'b-')
plt.show()
controls = {r'length_scale': widgets.FloatSlider(
min=0.01, max=5.0, step=0.1, value=1., continuous_update=False, description=r'$\ell$')}
from ipywidgets import interactive
import matplotlib.pyplot as plt
import numpy as np
def GP(length_scale, Test, Training, sigma):
np.random.seed(100)
This is code for simple GP regression. It assumes a zero mean GP Prior
# This is the true unknown function we are trying to approximate
def f(x): return np.sin(0.9*x.flatten())
# Define the kernel
def kernel(a, b):
sqdist = SP.distance.cdist(a, b, 'sqeuclidean')
return np.exp(-.5 * sqdist/(length_scale**2))
N = Training # number of training points.
n = Test # number of test points.
s = sigma # noise variance.
# Sample some input points and noisy versions of the function evaluated at
# these points.
X = np.random.uniform(-5, 5, size=(N, 1))
y = f(X) + s*np.random.randn(N)
K = kernel(X, X)
L = np.linalg.cholesky(K + s*np.eye(N))
# points we're going to make predictions at.
Xtest = np.linspace(-5, 5, n)[:, None]
# compute the mean at our test points.
Lk = np.linalg.solve(L, kernel(X, Xtest))
mu = np.dot(Lk.T, np.linalg.solve(L, y))
# compute the variance at our test points.
K_ = kernel(Xtest, Xtest)
s2 = np.diag(K_) - np.sum(Lk**2, axis=0)
s = np.sqrt(s2)
# PLOTS:
plt.figure(figsize=(9, 7))
plt.clf()
plt.plot(X, y, 'r+', ms=18, label="Training points")
plt.plot(Xtest, f(Xtest), 'b-', label="Function")
plt.gca().fill_between(Xtest.flat, mu-s, mu+s,
color="#dddddd", label="Confidence interval")
plt.plot(Xtest, mu, 'r--', lw=2, label="Approximation")
plt.title(r'Mean prediction plus-minus one s.d.')
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$f(x)$', fontsize=16)
plt.axis([-5, 5, -3, 3])
plt.legend()
print("Error (inf. norm) = ", np.linalg.norm(f(Xtest)-mu, ord=np.inf)/np.linalg.norm(f(Xtest), ord=np.inf))
plt.show()
controls = {r'sigma': widgets.FloatSlider(min=5e-4, max=5e-1, step=1e-3, value=1e-3, continuous_update=True, description=r'$\sigma$'),
r'length_scale': widgets.FloatSlider(min=0.1, max=2.0, step=0.05, value=0.7, continuous_update=True, description=r'$\ell$'),
r'Training': widgets.IntSlider(min=1, max=50, step=1, value=10, continuous_update=True, description=r'$N$ of $f$ evals'),
r'Test': widgets.IntSlider(min=1, max=100, step=1, value=50, continuous_update=True, description=r'$N$ of GP samples')}
interact(GP, **controls);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Multivariate function approximation
Step5: Now, let's approximate the function with polynomials taking different maximal power $n$ and the corresponding number of node points
Step6: Approximate value of the error
Step7: If we take another set of polynomials, the result of the approximation will be the same (coefficients $\alpha$ will be different of course).
Step8: Now, what will change if we take another set of node points?
Step10: Random input
Step11: Note, that the precise values are
Step13: Linear model
|
7,804
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time, os, json
import numpy as np
import matplotlib.pyplot as plt
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.rnn_layers import *
from cs231n.captioning_solver import CaptioningSolver
from cs231n.classifiers.rnn import CaptioningRNN
from cs231n.coco_utils import load_coco_data, sample_coco_minibatch, decode_captions
from cs231n.image_utils import image_from_url
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load COCO data from disk; this returns a dictionary
# We'll work with dimensionality-reduced features for this notebook, but feel
# free to experiment with the original features by changing the flag below.
data = load_coco_data(pca_features=True)
# Print out all the keys and values from the data dictionary
for k, v in data.iteritems():
if type(v) == np.ndarray:
print k, type(v), v.shape, v.dtype
else:
print k, type(v), len(v)
N, D, H = 3, 4, 5
x = np.linspace(-0.4, 1.2, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.3, 0.7, num=N*H).reshape(N, H)
prev_c = np.linspace(-0.4, 0.9, num=N*H).reshape(N, H)
Wx = np.linspace(-2.1, 1.3, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.7, 2.2, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.3, 0.7, num=4*H)
next_h, next_c, cache = lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)
expected_next_h = np.asarray([
[ 0.24635157, 0.28610883, 0.32240467, 0.35525807, 0.38474904],
[ 0.49223563, 0.55611431, 0.61507696, 0.66844003, 0.7159181 ],
[ 0.56735664, 0.66310127, 0.74419266, 0.80889665, 0.858299 ]])
expected_next_c = np.asarray([
[ 0.32986176, 0.39145139, 0.451556, 0.51014116, 0.56717407],
[ 0.66382255, 0.76674007, 0.87195994, 0.97902709, 1.08751345],
[ 0.74192008, 0.90592151, 1.07717006, 1.25120233, 1.42395676]])
print 'next_h error: ', rel_error(expected_next_h, next_h)
print 'next_c error: ', rel_error(expected_next_c, next_c)
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
prev_h = np.random.randn(N, H)
prev_c = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
next_h, next_c, cache = lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)
dnext_h = np.random.randn(*next_h.shape)
dnext_c = np.random.randn(*next_c.shape)
fx_h = lambda x: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fh_h = lambda h: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fc_h = lambda c: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fWx_h = lambda Wx: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fWh_h = lambda Wh: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fb_h = lambda b: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fx_c = lambda x: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fh_c = lambda h: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fc_c = lambda c: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fWx_c = lambda Wx: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fWh_c = lambda Wh: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fb_c = lambda b: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
num_grad = eval_numerical_gradient_array
dx_num = num_grad(fx_h, x, dnext_h) + num_grad(fx_c, x, dnext_c)
dh_num = num_grad(fh_h, prev_h, dnext_h) + num_grad(fh_c, prev_h, dnext_c)
dc_num = num_grad(fc_h, prev_c, dnext_h) + num_grad(fc_c, prev_c, dnext_c)
dWx_num = num_grad(fWx_h, Wx, dnext_h) + num_grad(fWx_c, Wx, dnext_c)
dWh_num = num_grad(fWh_h, Wh, dnext_h) + num_grad(fWh_c, Wh, dnext_c)
db_num = num_grad(fb_h, b, dnext_h) + num_grad(fb_c, b, dnext_c)
dx, dh, dc, dWx, dWh, db = lstm_step_backward(dnext_h, dnext_c, cache)
print 'dx error: ', rel_error(dx_num, dx)
print 'dh error: ', rel_error(dh_num, dh)
print 'dc error: ', rel_error(dc_num, dc)
print 'dWx error: ', rel_error(dWx_num, dWx)
print 'dWh error: ', rel_error(dWh_num, dWh)
print 'db error: ', rel_error(db_num, db)
N, D, H, T = 2, 5, 4, 3
x = np.linspace(-0.4, 0.6, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.4, 0.8, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.9, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.3, 0.6, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.2, 0.7, num=4*H)
h, cache = lstm_forward(x, h0, Wx, Wh, b)
expected_h = np.asarray([
[[ 0.01764008, 0.01823233, 0.01882671, 0.0194232 ],
[ 0.11287491, 0.12146228, 0.13018446, 0.13902939],
[ 0.31358768, 0.33338627, 0.35304453, 0.37250975]],
[[ 0.45767879, 0.4761092, 0.4936887, 0.51041945],
[ 0.6704845, 0.69350089, 0.71486014, 0.7346449 ],
[ 0.81733511, 0.83677871, 0.85403753, 0.86935314]]])
print 'h error: ', rel_error(expected_h, h)
from cs231n.rnn_layers import lstm_forward, lstm_backward
N, D, T, H = 2, 3, 10, 6
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
out, cache = lstm_forward(x, h0, Wx, Wh, b)
dout = np.random.randn(*out.shape)
dx, dh0, dWx, dWh, db = lstm_backward(dout, cache)
fx = lambda x: lstm_forward(x, h0, Wx, Wh, b)[0]
fh0 = lambda h0: lstm_forward(x, h0, Wx, Wh, b)[0]
fWx = lambda Wx: lstm_forward(x, h0, Wx, Wh, b)[0]
fWh = lambda Wh: lstm_forward(x, h0, Wx, Wh, b)[0]
fb = lambda b: lstm_forward(x, h0, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
dh0_num = eval_numerical_gradient_array(fh0, h0, dout)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dout)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dout)
db_num = eval_numerical_gradient_array(fb, b, dout)
print 'dx error: ', rel_error(dx_num, dx)
print 'dh0 error: ', rel_error(dx_num, dx)
print 'dWx error: ', rel_error(dx_num, dx)
print 'dWh error: ', rel_error(dx_num, dx)
print 'db error: ', rel_error(dx_num, dx)
N, D, W, H = 10, 20, 30, 40
word_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}
V = len(word_to_idx)
T = 13
model = CaptioningRNN(word_to_idx,
input_dim=D,
wordvec_dim=W,
hidden_dim=H,
cell_type='lstm',
dtype=np.float64)
# Set all model parameters to fixed values
for k, v in model.params.iteritems():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
features = np.linspace(-0.5, 1.7, num=N*D).reshape(N, D)
captions = (np.arange(N * T) % V).reshape(N, T)
loss, grads = model.loss(features, captions)
expected_loss = 9.82445935443
print 'loss: ', loss
print 'expected loss: ', expected_loss
print 'difference: ', abs(loss - expected_loss)
small_data = load_coco_data(max_train=50)
small_lstm_model = CaptioningRNN(
cell_type='lstm',
word_to_idx=data['word_to_idx'],
input_dim=data['train_features'].shape[1],
hidden_dim=512,
wordvec_dim=256,
dtype=np.float32,
)
small_lstm_solver = CaptioningSolver(small_lstm_model, small_data,
update_rule='adam',
num_epochs=50,
batch_size=25,
optim_config={
'learning_rate': 5e-3,
},
lr_decay=0.995,
verbose=True, print_every=10,
)
small_lstm_solver.train()
# Plot the training losses
plt.plot(small_lstm_solver.loss_history)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.show()
for split in ['train', 'val']:
minibatch = sample_coco_minibatch(small_data, split=split, batch_size=2)
gt_captions, features, urls = minibatch
gt_captions = decode_captions(gt_captions, data['idx_to_word'])
sample_captions = small_lstm_model.sample(features)
sample_captions = decode_captions(sample_captions, data['idx_to_word'])
for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):
plt.imshow(image_from_url(url))
plt.title('%s\n%s\nGT:%s' % (split, sample_caption, gt_caption))
plt.axis('off')
plt.show()
pass
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Captioning with LSTMs
Step2: Load MS-COCO data
Step3: LSTM
Step4: LSTM
Step5: LSTM
Step6: LSTM
Step7: LSTM captioning model
Step8: Overfit LSTM captioning model
Step9: LSTM test-time sampling
Step10: Train a good captioning model!
|
7,805
|
<ASSISTANT_TASK:>
Python Code:
from dolfin import *
from rbnics import *
@PullBackFormsToReferenceDomain()
@ShapeParametrization(
("x[0]", "x[1]"), # subdomain 1
("mu[0] * (x[0] - 1) + 1", "x[1]"), # subdomain 2
)
class EllipticOptimalControl(EllipticOptimalControlProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
EllipticOptimalControlProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
yup = TrialFunction(V)
(self.y, self.u, self.p) = split(yup)
zvq = TestFunction(V)
(self.z, self.v, self.q) = split(zvq)
self.dx = Measure("dx")(subdomain_data=subdomains)
self.ds = Measure("ds")(subdomain_data=boundaries)
# Regularization coefficient
self.alpha = 0.01
# Desired state
self.y_d = Constant(1.0)
# Customize linear solver parameters
self._linear_solver_parameters.update({
"linear_solver": "mumps"
})
# Return custom problem name
def name(self):
return "EllipticOptimalControl1RB"
# Return stability factor
def get_stability_factor_lower_bound(self):
return 1.
# Return theta multiplicative terms of the affine expansion of the problem.
def compute_theta(self, term):
mu = self.mu
if term in ("a", "a*"):
theta_a0 = 1.0
return (theta_a0,)
elif term in ("c", "c*"):
theta_c0 = 1.0
return (theta_c0,)
elif term == "m":
theta_m0 = 1.0
return (theta_m0,)
elif term == "n":
theta_n0 = self.alpha
return (theta_n0,)
elif term == "f":
theta_f0 = 1.0
return (theta_f0,)
elif term == "g":
theta_g0 = 1.0
theta_g1 = mu[1]
return (theta_g0, theta_g1)
elif term == "h":
theta_h0 = 1.0
theta_h1 = mu[1]**2
return (theta_h0, theta_h1)
elif term == "dirichlet_bc_y":
theta_bc0 = 1.
return (theta_bc0,)
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
def assemble_operator(self, term):
dx = self.dx
if term == "a":
y = self.y
q = self.q
a0 = inner(grad(y), grad(q)) * dx
return (a0,)
elif term == "a*":
z = self.z
p = self.p
as0 = inner(grad(z), grad(p)) * dx
return (as0,)
elif term == "c":
u = self.u
q = self.q
c0 = u * q * dx
return (c0,)
elif term == "c*":
v = self.v
p = self.p
cs0 = v * p * dx
return (cs0,)
elif term == "m":
y = self.y
z = self.z
m0 = y * z * dx
return (m0,)
elif term == "n":
u = self.u
v = self.v
n0 = u * v * dx
return (n0,)
elif term == "f":
q = self.q
f0 = Constant(0.0) * q * dx
return (f0,)
elif term == "g":
z = self.z
y_d = self.y_d
g0 = y_d * z * dx(1)
g1 = y_d * z * dx(2)
return (g0, g1)
elif term == "h":
y_d = self.y_d
h0 = y_d * y_d * dx(1, domain=mesh)
h1 = y_d * y_d * dx(2, domain=mesh)
return (h0, h1)
elif term == "dirichlet_bc_y":
bc0 = [DirichletBC(self.V.sub(0), Constant(1.0), self.boundaries, i) for i in range(1, 9)]
return (bc0,)
elif term == "dirichlet_bc_p":
bc0 = [DirichletBC(self.V.sub(2), Constant(0.0), self.boundaries, i) for i in range(1, 9)]
return (bc0,)
elif term == "inner_product_y":
y = self.y
z = self.z
x0 = inner(grad(y), grad(z)) * dx
return (x0,)
elif term == "inner_product_u":
u = self.u
v = self.v
x0 = u * v * dx
return (x0,)
elif term == "inner_product_p":
p = self.p
q = self.q
x0 = inner(grad(p), grad(q)) * dx
return (x0,)
else:
raise ValueError("Invalid term for assemble_operator().")
mesh = Mesh("data/mesh1.xml")
subdomains = MeshFunction("size_t", mesh, "data/mesh1_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/mesh1_facet_region.xml")
scalar_element = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(scalar_element, scalar_element, scalar_element)
V = FunctionSpace(mesh, element, components=["y", "u", "p"])
problem = EllipticOptimalControl(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [(1.0, 3.5), (0.5, 2.5)]
problem.set_mu_range(mu_range)
reduced_basis_method = ReducedBasis(problem)
reduced_basis_method.set_Nmax(20)
lifting_mu = (1.0, 1.0)
problem.set_mu(lifting_mu)
reduced_basis_method.initialize_training_set(100)
reduced_problem = reduced_basis_method.offline()
online_mu = (3.0, 0.6)
reduced_problem.set_mu(online_mu)
reduced_solution = reduced_problem.solve()
print("Reduced output for mu =", online_mu, "is", reduced_problem.compute_output())
plot(reduced_solution, reduced_problem=reduced_problem, component="y")
plot(reduced_solution, reduced_problem=reduced_problem, component="u")
plot(reduced_solution, reduced_problem=reduced_problem, component="p")
reduced_basis_method.initialize_testing_set(100)
reduced_basis_method.error_analysis()
reduced_basis_method.speedup_analysis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine Decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element space (Lagrange P1)
Step4: 4.3. Allocate an object of the EllipticOptimalControl class
Step5: 4.4. Prepare reduction with a reduced basis method
Step6: 4.5. Perform the offline phase
Step7: 4.6. Perform an online solve
Step8: 4.7. Perform an error analysis
Step9: 4.8. Perform a speedup analysis
|
7,806
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib
matplotlib.style.use('ggplot')
%matplotlib inline
training_data = {
'x': [0, 1, 2, 3],
'y': [4, 7, 7, 8]
}
train_df = pd.DataFrame.from_dict(training_data)
train_df
train_df.plot(kind='scatter', x='x', y='y')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's plot the data to see what it looks like
|
7,807
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import sys
sys.path.append("../lib/")
import seaborn as sns
import pandas as pd
from operator import itemgetter
from dataContainer import DataContainer, dataClassMapper
container = DataContainer() # load the data
data = container.collapse() # remove the country of origin information
data = container.orderColumns(data) # order the columns based on category, i.e. education, economy, etc.
nrows, ncol = np.shape(data)
keepColumns = list()
for column in data.columns:
if data[column].notnull().sum() >= nrows*0.5:
keepColumns.append(column)
dataReduced = data[keepColumns] # reduce the dataset
print("Selected %i out of %i indicators with not more than 50%% missing values" %(len(keepColumns), ncol))
allData = dict()
for name, group in dataReduced.groupby(["Country"]):
del group["Country"]
del group["Year"]
diffmat = group.diff()
diffmat[diffmat<=0] = -1
diffmat[diffmat >0] = 1
corrmat = diffmat.corr()
allData[name] = corrmat
pan = pd.Panel(allData)
m = pan.mean(axis=0) # the mean of the correlation across countries
s = pan.std( axis=0) # the standard deviation of the correlation across countries
mapper = dataClassMapper() # the dataClassMapper allows mapping of short indicator IDs to human readible output
labels = list() # will contain for each position the category of its indicator
for idx, name in enumerate(m.columns):
labels.append((idx, mapper(name)[0]))
df = pd.DataFrame(labels) # convert to DataFrame to allow grouping
category_ticks = list() # will contain for each category the starting and ending index
for category, group in df.groupby(1):
category_ticks.append((category, np.min(group[0]), np.max(group[0])))
category_ticks = sorted(category_ticks, key=itemgetter(1)) # sort based on the starting index
placeTicksHere = [ i[1]+np.floor((i[2] -i[1])/2) for i in category_ticks ] # calculate the tick position
ticks = list() # the axes tick labels
count = 0
for i in range(len(labels)):
if i in placeTicksHere:
ticks.append(category_ticks[count][0]) # append the category name
count += 1
else:
ticks.append("") # nothing to show
f, ax = plt.subplots(figsize=(12, 9))
ax = sns.heatmap(m, vmax=.8, square=True, xticklabels=ticks, yticklabels=ticks, ax=ax)
linesHere = [ item[2] + 1 for item in category_ticks ]
for i in linesHere:
ax.axhline(np.shape(s)[0]-i, c="grey")
ax.axvline(i, c="grey")
f, ax = plt.subplots(figsize=(12, 9))
ax = sns.heatmap(s, vmin=.3, vmax=.6, square=True, xticklabels=ticks, yticklabels=ticks, ax=ax, cmap="Blues")
linesHere = [ item[2] + 1 for item in category_ticks ]
for i in linesHere:
ax.axhline(np.shape(s)[0]-i, c="grey")
ax.axvline(i, c="grey")
# Save the last figure
import time
fname = "%i%i%i_std_CorrMatrixGlobal.png" %(time.localtime()[0], time.localtime()[1], time.localtime()[2])
f.tight_layout()
f.savefig(fname, dpi=120)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The DataContainer class is a container that integrates the available datasets. It allows easy loading and combining (and hopefully more cool stuff in the future). The UNHCR and OECD migration data includes information about the country of origin of the migration. For now this information is not needed and we can combine the data using the collapse() method.
Step2: To get a good sense of the data without being totaly overwhelmed by all the missing values we will drop every indicator which has more than 50% missing values. This typically happens if the indicator was introduced recently and no information about the past is available, or if this particular information is difficult to collect.
Step3: This reduced data can now be grouped by country and the correlation matrix can be calculated. Notice that the correlation will be calculated on the (binary) difference matrix. If an indicator was increasing compared to the year before it will be set to +1, if it decreased it will be set to -1.
Step4: Here we created a dictionary that holds the correlation matrix for each country. One easy way to obtain an average correlation matrix from this is to use pandas Panels. Once put into the panel object we can easily compute the average and standart deviation across the DataFrames.
Step5: In order to produce a nice plot we need to do some tricks. The indicators are grouped by categories (see the wiki for more information). Guides in the following heatmap should be plotted to indicate these different groups. Assuming that the categorisation of the indicators was done sensibly, stronger correlation between indicators within one group compared to inter-group correlations should be observed. The next few steps can safely be ignored. They are simply there to add the lines and labels to the plot.
Step6: Finally we can plot the average correlation between the indicators over all countries.
Step7: What do we see in this plot?
Step8: What do we see in this plot?
|
7,808
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
assert float(tf.__version__[:3]) >= 2.3
from zipfile import ZipFile
import os
uri = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip"
zip_path = keras.utils.get_file(origin=uri, fname="jena_climate_2009_2016.csv.zip")
zip_file = ZipFile(zip_path)
zip_file.extractall()
csv_path = "jena_climate_2009_2016.csv"
df = pd.read_csv(csv_path)
titles = [
"Pressure",
"Temperature",
"Temperature in Kelvin",
"Temperature (dew point)",
"Relative Humidity",
"Saturation vapor pressure",
"Vapor pressure",
"Vapor pressure deficit",
"Specific humidity",
"Water vapor concentration",
"Airtight",
"Wind speed",
"Maximum wind speed",
"Wind direction in degrees",
]
feature_keys = [
"p (mbar)",
"T (degC)",
"Tpot (K)",
"Tdew (degC)",
"rh (%)",
"VPmax (mbar)",
"VPact (mbar)",
"VPdef (mbar)",
"sh (g/kg)",
"H2OC (mmol/mol)",
"rho (g/m**3)",
"wv (m/s)",
"max. wv (m/s)",
"wd (deg)",
]
colors = [
"blue",
"orange",
"green",
"red",
"purple",
"brown",
"pink",
"gray",
"olive",
"cyan",
]
date_time_key = "Date Time"
def show_raw_visualization(data):
time_data = data[date_time_key]
fig, axes = plt.subplots(
nrows=7, ncols=2, figsize=(15, 20), dpi=80, facecolor="w", edgecolor="k"
)
for i in range(len(feature_keys)):
key = feature_keys[i]
c = colors[i % (len(colors))]
t_data = data[key]
t_data.index = time_data
t_data.head()
ax = t_data.plot(
ax=axes[i // 2, i % 2],
color=c,
title="{} - {}".format(titles[i], key),
rot=25,
)
ax.legend([titles[i]])
plt.tight_layout()
show_raw_visualization(df)
def show_heatmap(data):
plt.matshow(data.corr())
plt.xticks(range(data.shape[1]), data.columns, fontsize=14, rotation=90)
plt.gca().xaxis.tick_bottom()
plt.yticks(range(data.shape[1]), data.columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title("Feature Correlation Heatmap", fontsize=14)
plt.show()
show_heatmap(df)
split_fraction = 0.715
train_split = int(split_fraction * int(df.shape[0]))
step = 6
past = 720
future = 72
learning_rate = 0.001
batch_size = 256
def normalize(data, train_split):
data_mean = data[:train_split].mean(axis=0)
data_std = data[:train_split].std(axis=0)
return (data - data_mean) / data_std
print(
"The selected parameters are:",
", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]),
)
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]]
features = df[selected_features]
features.index = df[date_time_key]
features.head()
features = normalize(features.values, train_split)
features = pd.DataFrame(features)
features.head()
train_data = features.loc[0 : train_split - 1]
val_data = features.loc[train_split:]
start = past + future
end = start + train_split
x_train = train_data[[i for i in range(7)]].values
y_train = features.iloc[start:end][[1]]
print('training size:', len(x_train))
sequence_length = int(past / step)
print('window size:', sequence_length)
dataset_train = keras.preprocessing.timeseries_dataset_from_array(
x_train,
y_train,
sequence_length=sequence_length,
sampling_rate=step,
batch_size=batch_size,
)
x_end = len(val_data) - past - future
label_start = train_split + past + future
x_val = val_data.iloc[:x_end][[i for i in range(7)]].values
y_val = features.iloc[label_start:][[1]]
dataset_val = keras.preprocessing.timeseries_dataset_from_array(
x_val,
y_val,
sequence_length=sequence_length,
sampling_rate=step,
batch_size=batch_size,
)
for batch in dataset_train.take(1):
inputs, targets = batch
print("Input shape:", inputs.numpy().shape)
print("Target shape:", targets.numpy().shape)
inputs = keras.layers.Input(shape=(inputs.shape[1], inputs.shape[2]))
lstm_out = keras.layers.LSTM(32)(inputs)
outputs = keras.layers.Dense(1)(lstm_out)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss='mse')
model.summary()
epochs = 10
path_checkpoint = "model_checkpoint.h5"
es_callback = keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=5)
modelckpt_callback = keras.callbacks.ModelCheckpoint(
monitor="val_loss",
filepath=path_checkpoint,
verbose=1,
save_weights_only=True,
save_best_only=True,
)
history = model.fit(
dataset_train,
epochs=epochs,
validation_data=dataset_val,
callbacks=[es_callback, modelckpt_callback],
)
def visualize_loss(history, title):
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, "b", label="Training loss")
plt.plot(epochs, val_loss, "r", label="Validation loss")
plt.title(title)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
print("Final val loss: ", val_loss)
visualize_loss(history, "Training and Validation Loss")
dataset_test = keras.preprocessing.timeseries_dataset_from_array(
x_val,
y_val,
sequence_length=sequence_length,
sequence_stride=int(sequence_length * 6),
sampling_rate=step,
batch_size=1,
)
def show_plot(plot_data, delta, title):
labels = ["History", "True Future", "Model Prediction"]
marker = [".-", "rx", "go"]
time_steps = list(range(-(plot_data[0].shape[0]), 0))
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, val in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future + 5) * 2])
plt.xlabel("Time-Step")
plt.show()
return
for x, y in dataset_test.take(5):
prediction = model.predict(x)
prediction = prediction[0]
print('prediction:', prediction)
print('truth:', y[0].numpy())
show_plot(
[x[0][:, 1].numpy(), y[0], prediction],
12,
"Single Step Prediction",
)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('weather_forecast.tflite', 'wb') as f:
f.write(tflite_model)
batch_size = 1
model.input.set_shape((batch_size,) + model.input.shape[1:])
model.summary()
# Our representative dataset is the same as the training dataset,
# but the batch size must now be 1
dataset_repr = keras.preprocessing.timeseries_dataset_from_array(
x_train,
y_train,
sequence_length=sequence_length,
sampling_rate=step,
batch_size=batch_size,
)
def representative_data_gen():
# To ensure full coverage of possible inputs, we use the whole train set
for input_data, _ in dataset_repr.take(int(len(x_train))):
input_data = tf.cast(input_data, dtype=tf.float32)
yield [input_data]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
# This enables quantization
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# This sets the representative dataset for quantization
converter.representative_dataset = representative_data_gen
# This ensures that if any ops can't be quantized, the converter throws an error
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# For full integer quantization, though supported types defaults to int8 only, we explicitly declare it for clarity
converter.target_spec.supported_types = [tf.int8]
# These set the input and output tensors to int8
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant = converter.convert()
with open('weather_forecast_quant.tflite', 'wb') as f:
f.write(tflite_model_quant)
import numpy as np
def set_input_tensor(interpreter, input):
input_details = interpreter.get_input_details()[0]
tensor_index = input_details['index']
input_tensor = interpreter.tensor(tensor_index)()
# Inputs for the TFLite model must be uint8, so we quantize our input data.
scale, zero_point = input_details['quantization']
quantized_input = np.uint8(input / scale + zero_point)
input_tensor[:, :, :] = quantized_input
def predict_weather(interpreter, input):
set_input_tensor(interpreter, input)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = interpreter.get_tensor(output_details['index'])
# Outputs from the TFLite model are uint8, so we dequantize the results:
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
return output
interpreter = tf.lite.Interpreter('weather_forecast_quant.tflite')
interpreter.allocate_tensors()
for x, y in dataset_test.take(5):
prediction = predict_weather(interpreter, x)
print('prediction:', prediction[0])
print('truth:', y[0].numpy())
show_plot(
[x[0][:, 1].numpy(), y[0], prediction[0]],
12,
"Single Step Prediction (TF Lite)",
)
! curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
! echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
! sudo apt-get update
! sudo apt-get install edgetpu-compiler
! edgetpu_compiler weather_forecast_quant.tflite
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train an LSTM weather forecasting model for the Coral Edge TPU
Step2: Prepare the climate dataset
Step3: Visualize the data
Step4: This heat map shows the correlation between different features.
Step5: Preprocess the dataset
Step6: We can see from the correlation heatmap, few parameters like Relative Humidity and
Step7: Split the training and validation dataset
Step8: The timeseries_dataset_from_array function takes in a sequence of data-points gathered at
Step9: The validation dataset must not contain the last 792 rows as we won't have label data for
Step10: The input shape is the above-defined batch_size, the sequence length (120 hours), and the 7 selected features.
Step11: We'll use the ModelCheckpoint callback to regularly save checkpoints, and
Step12: We can visualize the loss with the function below. After one point, the loss stops
Step13: Plot some predictions
Step14: Convert to TF Lite
Step15: That gives us a basic TF Lite version, which can be useful for benchmarks, but we need it fully-quantized for compatibility with the Edge TPU...
Step16: This part takes several minutes due to the size of the representative dataset
Step17: Try some TF Lite predictions
Step18: If you compare these predictions from the quantized model to those we got from the float Keras model above, they're not very different.
|
7,809
|
<ASSISTANT_TASK:>
Python Code:
# Show matplotlib plots inline (nicely formatted in the notebook)
%matplotlib inline
# Import libraries necessary for this project
import numpy as np
import pandas as pd
import renders as rs
import seaborn as sns
from matplotlib import pylab as plt
from IPython.display import display # Allows the use of display() for DataFrames
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
# Display a description of the dataset
display(data.describe())
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [13,120,390]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
print "Mean Differences"
display(samples - np.round(data.mean()))
print "Median Differences"
display(samples - np.round(data.median()))
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = data.drop('Fresh',axis=1)
# TODO: Split the data into training and testing sets using the given feature as the target
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(new_data,data['Fresh'],test_size=0.25,random_state=42)
# TODO: Create a decision tree regressor and fit it to the training set
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=42)
# TODO: Report the score of the prediction using the testing set
regressor.fit(X_train,y_train)
score = regressor.score(X_test,y_test)
print score
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
import seaborn as sns
import pylab as plt
corr = data.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask, 1)] = True
with sns.axes_style("white"):
ax = sns.heatmap(corr, mask=mask, square=True, annot=True, cmap='RdBu_r')
plt.xticks(rotation=45, ha='center');
# TODO: Scale the data using the natural logarithm
log_data = np.log(data)
# TODO: Scale the sample data using the natural logarithm
log_samples = np.log(samples)
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# Display the log-transformed sample data
display(log_samples)
# For each feature find the data points with extreme high or low values
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature],q=25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature],q=75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = (Q3-Q1)*1.5
# Display the outliers
print "Data points considered outliers for the feature '{}':".format(feature)
display(log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))])
# OPTIONAL: Select the indices for data points you wish to remove [66,95,75,109,128,142,187,218,338]
outliers = [val for sublist in
[log_data[~((log_data[feature] >= np.percentile(log_data[feature],q=25) -
((np.percentile(log_data[feature],q=75) -
np.percentile(log_data[feature],q=25))*1.5)) &
(log_data[feature] <= np.percentile(log_data[feature],q=75) +
((np.percentile(log_data[feature],q=75) -
np.percentile(log_data[feature],q=25))*1.5)))].index.values
for feature in log_data.keys()] for val in sublist]
outliers = list(set([x for x in outliers if outliers.count(x)>1]))
print outliers
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
# TODO: Apply PCA to the good data with the same number of dimensions as features
from sklearn.decomposition import PCA
pca = PCA()
pca.fit(good_data)
# TODO: Apply a PCA transformation to the sample log-data
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = rs.pca_results(good_data, pca)
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
# TODO: Fit PCA to the good data using only two dimensions
pca = PCA(n_components=2)
pca.fit(good_data)
# TODO: Apply a PCA transformation the good data
reduced_data = pca.transform(good_data)
# TODO: Apply a PCA transformation to the sample log-data
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# TODO: Apply your clustering algorithm of choice to the reduced data
from sklearn.mixture import GMM
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
for i in range (6,1,-1):
clusterer = GMM(n_components = i,random_state=42)
clusterer.fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
centers = clusterer.means_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = silhouette_score(reduced_data,preds)
print "clusters = {} : score = {:.3f}\n".format(i,score),
# Display the results of the clustering from implementation
rs.cluster_results(reduced_data, preds, centers, pca_samples)
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
print "Mean Difference"
display(true_centers - np.round(data.mean()))
print "Median Differences"
display((true_centers - np.round(data.median())))
print "N-stds"
display((true_centers - np.round(data.mean()))/data.std())
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
# Display the clustering results based on 'Channel' data
rs.channel_results(reduced_data, outliers, pca_samples)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Exploration
Step2: Implementation
Step3: Question 1
Step4: Answer
Step5: Question 2
Step6: Question 3
Step7: Answer
Step8: Observation
Step9: Implementation
Step10: Question 4
Step11: Question 5
Step12: Implementation
Step13: Observation
Step14: Question 6
Step15: Question 7
Step16: Implementation
Step17: Question 8
Step18: Answer
Step19: Answer
|
7,810
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import numpy as np
from PIL import Image # for bmp import
from glob import glob
from scipy.misc import imresize
import matplotlib.pyplot as plt
import math
import time
%matplotlib inline
def showImage(imageToPlot):
plt.figure(figsize=(2, 4))
plt.gray()
plt.imshow(imageToPlot.reshape(imageToPlot.shape), aspect='auto')
plt.show()
def normImag(A):
# Let's normalize the image
A = A - A.min()
A = 1.0*A/A.max()
return(A)
im = Image.open("fantom.bmp").convert('L') # convert 'L' is to get a flat image, not RGB
groundTruth = normImag(np.array(im)) # we use the full [0;1] range
showImage(groundTruth)
depth = 0.13 # in meters
resolution = groundTruth.shape[0]/depth # in pts/m
t = depth*2.0/1450.0
print('Image resolution in pixels/mm : ', resolution/1000.0)
print('Listening time in micro-secs : ', t*1.0E6)
sps = 60.0E6
f = 3.5E6
L = int(t*sps)
print("Number of points in raw signal : ", L)
# First create a table of L points for each line, from the original image, by using bicubic interpolation
# This is to get a smoother and more realistic raw signal
BigImg = imresize(groundTruth, ( L,groundTruth.shape[1]), interp='bicubic')
# Then simulate raw signal by modulating the data of BigImg with a sinusoidal function,
# the frequence of which corresponds to the piezo frequency
rawSignal = np.zeros(shape=(L,groundTruth.shape[1]))
for i in range(len(rawSignal)):
for j in range(len(rawSignal[0])):
pixelValue = 1.0*BigImg[i][j]
w = 2.0*math.radians(180)*f
rawSignal[i][j] = pixelValue*math.cos(1.0*i*w/sps)
line = np.zeros(shape=(L))
imageLine = np.zeros(shape=(L))
for i in range(len(rawSignal)):
line[i] = rawSignal[i][10]
imageLine[i] = BigImg[i][10]
plt.plot(line)
plt.plot(imageLine)
plt.show()
maxFreq = 6.0E6
xLimit = int(L*maxFreq/sps) # upper cap to
lineFFT = np.abs(np.fft.fft(line))
xScale = range(xLimit)
for i in range(xLimit):
xScale[i] = (60.0E6)*float(xScale[i])/(L*(1.0E6))
plt.plot(xScale,lineFFT[0:xLimit])
plt.xlabel('Frequency (MHz)')
plt.show()
# Let's save the raw signal data
np.savetxt("RawSignal.csv.gz",rawSignal, delimiter=';')
def ssd(A,B):
A = A - 0.95*A.min()
A = 1.0*A/A.max()
B = B - 0.95*B.min()
B = 1.0*B/B.max()
squares = (A[:,:] - B[:,:]) ** 2
return np.sum(squares)
def estimateScore(groundTruth, reconstructedImage) :
errorMap = (groundTruth - reconstructedImage)
print('Error map between ground truth and reconstructed image : ')
showImage(errorMap)
score = ssd(reconstructedImage,groundTruth)
maxErr = errorMap.max()
return [score,maxErr]
def compareImages(im1,im2) :
plt.figure()
ax = plt.subplot(1, 2, 1)
plt.imshow(im1)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(1, 2, 2)
plt.imshow(im2)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def reconstructBaseline(rawSignal,image_shape) :
reconstructedImage = np.zeros(shape=(image_shape[0],image_shape[1]))
decimationFactor = 1.0*rawSignal.shape[0]/image_shape[0]
for i in range(rawSignal.shape[0]):
for j in range(image_shape[1]):
reconstructedImage[int(i/decimationFactor)][j] += np.abs(rawSignal[i][j])
reconstructedImage = normImag(np.abs(reconstructedImage))
return reconstructedImage
reconBaseline = reconstructBaseline(rawSignal,groundTruth.shape)
compareImages(groundTruth, reconBaseline)
[scoreBaseline,maxErrBaseline] = estimateScore(groundTruth, reconBaseline)
print('Score for Baseline method : ', scoreBaseline)
print('max Err between pixels for Baseline method : ', maxErrBaseline)
def reconstructImage(rawSignal,image_shape) :
# Here is a copy of the baseline method. Replace that by another method.
reconstructedImage = np.zeros(shape=(image_shape[0],image_shape[1]))
decimationFactor = 1.0*rawSignal.shape[0]/image_shape[0]
for i in range(rawSignal.shape[0]):
for j in range(image_shape[1]):
reconstructedImage[int(i/decimationFactor)][j] += np.abs(rawSignal[i][j])
reconstructedImage = normImag(np.abs(reconstructedImage))
# The function should return the reconstructed image
return reconstructedImage
recon = reconstructImage(rawSignal,groundTruth.shape)
compareImages(groundTruth, recon)
[score,maxErr] = estimateScore(groundTruth, recon)
print('Score for your method : ', score)
print('max Err between pixels for your method : ', maxErr)
def install_packages():
import pip
pip.main(['install', 'scipy'])
def run(rawSignal,image_shape) :
import numpy as np
from scipy.signal import hilbert
reconstructedImage = np.zeros(shape=(image_shape[0],image_shape[1]))
analytic_signal = hilbert(rawSignal)
amplitude_envelope = np.abs(analytic_signal)
decimationFactor = 1.0*amplitude_envelope.shape[0]/image_shape[0]
old_pixel = 0
nb_points=0
for i in range(amplitude_envelope.shape[0]):
for j in range(image_shape[1]):
reconstructedImage[int(i/decimationFactor)][j] += np.abs(amplitude_envelope[i][j])
if (int(i/decimationFactor) == old_pixel):
nb_points += 1
else:
nb_points += 1
reconstructedImage[int(i/decimationFactor)-1] = reconstructedImage[int(i/decimationFactor)-1]/nb_points
nb_points = 1
old_pixel = old_pixel+1
reconstructedImage = normImag(np.abs(reconstructedImage))
# The function should return the reconstructed image
return reconstructedImage
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading and studying the 342x719 image of fantom
Step2: Let's assume vertical line points are spaced by 1cm each. This corresponds to a depth of about 13cm.
Step3: The corresponding resolution is 5.53 pts / mm. At a speed of 1450m/s for sound, we'd have a listening time of around 180µs of recording.
Step4: The corresponding length of raw signal is close to 11k points.
Step5: Let's check that we have the image (in green) and the corresponding signal (in blue)
Step6: Let's analyse this signal in the frequency domain, through a FFT. We should see the image, modulated by the 3.5MHz. That is, a "potato" around a 3.5MHz peak
Step7: Conclusion
Step8: Envelope detection challenge
Step9: Baseline method (don't change this one ! )
Step10: Let's compare the image reconstructed with the baseline method, with the ground truth to achieve
Step11: Your turn
Step12: Performance assessment of your method
Step13: Submitting your own method to the leaderboard
|
7,811
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os, sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import requests
import StringIO
# set matplotlib style
matplotlib.style.use('ggplot')
sitename = 'alligatorriver'
roiname = 'DB_0001'
infile = "{}_{}_roistats.csv".format(sitename, roiname)
print infile
with open(infile,'r') as fd:
df = pd.read_csv(fd, comment='#', parse_dates=[[0,1]])
df.head()
x = df.date_local_std_time
y = df.gcc
plt.figure(figsize=[16,4])
plt.plot_date(x, y, 'k.', markersize=.5)
url = "https://phenocam.sr.unh.edu/data/archive/{}/ROI/{}"
url = url.format(sitename, infile)
print url
response = requests.get(url)
fd = StringIO.StringIO(response.text)
df = pd.read_csv(fd, comment='#', parse_dates=[[0,1]])
fd.close
df[0:3]
x = df.date_local_std_time
y = df.gcc
plt.figure(figsize=[16,4])
plt.plot_date(x, y, 'k.', markersize=.5)
df.index = df.date_local_std_time
df.gcc.plot(style='k.', markersize=.5, figsize=[16,4])
df.rcc.plot(style='r.', markersize=.5, figsize=[16,4])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: While the data can be read directly from a URL we'll start by doing the simple thing of reading the CSV file directly from our local disk.
Step2: Okay, that worked. The date times seem to be a problem for plotting in pandas so I'm going to drop back to using matplotlib directly.
Step3: That was pretty simple. Now try to read directly from a URL to see if we get the same result. This has the advantage that you always get the latest version of the file which is updated nightly.
Step4: I finally figured out how to plot this using pandas. It's pretty simple once you know what you need to do.
Step5: Once the data frame is indexed with the datetime other values can also be plotted as a function of time.
|
7,812
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import numpy as np
policy = np.array([[0.3, 0.2, 0.5], [0.5, 0.4, 0.1], [0.8, 0.1, 0.1]])
print("This is represents the policy with 3 states and 3 actions p(row=a|col=s):\n", np.matrix(policy))
# 'raw_rewards' variable contains rewards obtained after transition to each state
# In our example it doesn't depend on source state
raw_rewards = np.array([1.5, -1.833333333, 19.833333333])
# 'rewards' variable contains expected values of the next reward for each state
rewards = np.matmul(policy, raw_rewards)
assert np.allclose(rewards, np.array([10., 2., 3.]))
gamma = 0.1
print('This are the rewards for each action:\n', rewards)
state_value_function = np.array([0 for i in range(3)])
print('Policy evaluation:')
for i in range(20):
print('V_{}={}'.format(i, state_value_function))
state_value_function = rewards + gamma * (np.matmul(policy, state_value_function))
print('\nV={}'.format(state_value_function))
solution=np.matmul(np.linalg.inv(np.eye(3)-0.1*policy), rewards)
print('Solution by inversion:\nV={}'.format(state_value_function))
import random
from collections import defaultdict
reward_counter = np.array([0., 0., 0.])
visit_counter = np.array([0., 0., 0.])
nIterations = 400
def gt(rewardlist, gamma=0.1):
'''
Function to calculate the total discounted reward
>>> gt([10, 2, 3], gamma=0.1)
10.23
'''
total_disc_return = 0
for (i, value) in enumerate(rewardlist):
total_disc_return += (gamma ** i) * value
return total_disc_return
for i in range(nIterations):
start_state = random.randint(0, 2)
next_state = start_state
rewardlist = []
occurence = defaultdict(list)
for i in range(250): #draw samples from the policy recursively over horizon of N=250
rewardlist.append(rewards[next_state])
occurence[next_state].append(len(rewardlist) - 1)
action = np.random.choice(np.arange(0, 3), p=policy[next_state])
next_state = action
for state in occurence:
for value in occurence[state]: #update state value function E[G_t|s]=S(s)/N(s)
rew = gt(rewardlist[value:])
reward_counter[state] += rew # S(s)
visit_counter[state] += 1 # N(s)
print("MC policy evaluation V=", reward_counter / visit_counter)
q_table = np.zeros((3, 3)) #state action value function Q-table
gamma = 0.1
alpha = 1.0
eps = 0.1
def get_eps_greedy_action(state):
if random.uniform(0, 1) < eps:
return random.randint(0, 2)
return np.argmax(q_table[state]).item()
for i in range(1001):
state = random.randint(0, 2)
action = get_eps_greedy_action(state)
next_state = action
reward = raw_rewards[next_state]
next_q = max(q_table[next_state]) #s.a. value evaluation at the next state
q_table[state, action] = q_table[state, action] + alpha* (
reward + gamma * (next_q) - q_table[state, action]) #Q-Table update
if i % 200 == 0:
print("Q_{}(s,a)=".format(i),q_table)
import numpy as np
raw_rewards = np.array([1.5, -1.833333333, 19.833333333])
gamma = 0.1
state_value_function = np.zeros(3)
print('V_{} = {}'.format(0, state_value_function))
for i in range(1000):
for s in range(3):
Q_s = [raw_rewards[s_next] + gamma * state_value_function[s_next]
for s_next in range(3)]
state_value_function[s] = max(Q_s)
if i % 100 == 99:
print('V_{} = {}'.format(i + 1, state_value_function))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Policy Evaluation by Dynamic Programming
Step2: Policy Evaluation by Linear Programming
Step3: The result stays the same.
Step4: As can be seen the result is nearly the same as the state-value-function calculated above.
Step5: Value Iteration
|
7,813
|
<ASSISTANT_TASK:>
Python Code:
from pyspark.sql import SQLContext
# adding the PySpark module to SparkContext
sc.addPyFile("https://raw.githubusercontent.com/seahboonsiew/pyspark-csv/master/pyspark_csv.py")
import pyspark_csv as pycsv
# you may need to modify this line if the filename or path is different.
sqlContext = SQLContext(sc)
data = sc.textFile("/resources/sample-data.csv")
def skip_header(idx, iterator):
if (idx == 0):
next(iterator)
return iterator
body = data.mapPartitionsWithIndex(skip_header)
header = data.first()
header_list = header.split(",")
# create Spark DataFrame using pyspark-csv
data_df = pycsv.csvToDataFrame(sqlContext, body, sep=",", columns=header_list)
data_df.cache()
data_df.printSchema()
# retrieve the first row
data_df.take(1)
# retrieve the number of rows
data_df.count()
# create a pandas dataframe from the SQL dataframe
import pprint
import pandas as pd
pandaDF = data_df.toPandas()
#Fill NA/NaN values to 0
pandaDF.fillna(0, inplace=True)
pandaDF.columns
# change index to time if its present
valueHeaderName = 'value'
timeHeaderName = 'null'
if (len(header_list) == 2):
timeHeaderName = header_list[0]
valueHeaderName = header_list[1]
else:
valueHeaderName = header_list[0]
# Drop the timestamp column as the index is replaced with timestamp now
if (len(header_list) == 2):
pandaDF.index = pandaDF[timeHeaderName]
pandaDF = pandaDF.drop([timeHeaderName], axis=1)
# Also, sort the index with the timestamp
pandaDF.sort_index(inplace=True)
pandaDF.head(n=5)
# calculate z-score and populate a new column
pandaDF['zscore'] = (pandaDF[valueHeaderName] - pandaDF[valueHeaderName].mean())/pandaDF[valueHeaderName].std(ddof=0)
pandaDF.head(n=5)
# ignore warnings if any
import warnings
warnings.filterwarnings('ignore')
# render the results as inline charts:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
'''
This function detects the spike and dip by returning a non-zero value
when the z-score is above 3 (spike) and below -3(dip). Incase if you
want to capture the smaller spikes and dips, lower the zscore value from
3 to 2 in this function.
'''
def spike(row):
if(row['zscore'] >=3 or row['zscore'] <=-3):
return row[valueHeaderName]
else:
return 0
pandaDF['spike'] = pandaDF.apply(spike, axis=1)
# select rows that are required for plotting
plotDF = pandaDF[[valueHeaderName,'spike']]
#calculate the y minimum value
y_min = (pandaDF[valueHeaderName].max() - pandaDF[valueHeaderName].min()) / 10
fig, ax = plt.subplots(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')
ax.set_ylim(plotDF[valueHeaderName].min() - y_min, plotDF[valueHeaderName].max() + y_min)
x_filt = plotDF.index[plotDF.spike != 0]
plotDF['xyvaluexy'] = plotDF[valueHeaderName]
y_filt = plotDF.xyvaluexy[plotDF.spike != 0]
#Plot the raw data in blue colour
line1 = ax.plot(plotDF.index, plotDF[valueHeaderName], '-', color='blue', animated = True, linewidth=1)
#plot the anomalies in red circle
line2 = ax.plot(x_filt, y_filt, 'ro', color='red', linewidth=2, animated = True)
#Fill the raw area
ax.fill_between(plotDF.index, (pandaDF[valueHeaderName].min() - y_min), plotDF[valueHeaderName], interpolate=True, color='blue',alpha=0.6)
# Label the axis
ax.set_xlabel("Sequence",fontsize=20)
ax.set_ylabel(valueHeaderName,fontsize=20)
plt.tight_layout()
plt.legend()
plt.show()
# calculate the value that is corresponding to z-score 3
(pandaDF[valueHeaderName].std(ddof=0) * 3) + pandaDF[valueHeaderName].mean()
# calculate the value that is corresponding to z-score -3
(pandaDF[valueHeaderName].std(ddof=0) * -3) + pandaDF[valueHeaderName].mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enter the following command in the next cell to look at the first record and click Run
Step2: Enter the following command in the next cell to get the number of rows in the CSV file (DataFrame) and click Run,
Step3: Create Pandas DataFrame
Step4: Enter the following commands in the next cell to set timestamp as the index if its present and click Run,
Step5: Calculate z-score
Step6: Plot Anomalies
Step7: As shown, the red marks are the unexpected spikes and dips whose z-score value is greater than 3 or less than -3. Incase if you want to detect the lower spikes, modify the value to 2 or even lower and run. Similarly, if you want to detect only the higher spikes, try increasing the z-score value from 3 to 4 and beyond.
Step8: Similarly, Enter the following command into the next cell to derive the dip threshold value corresponding to z-score value -3.
|
7,814
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
label_colors = [label.color for label in labels]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin = 8.
fmax = 13.
sfreq = raw.info['sfreq'] # the sampling frequency
con_methods = ['pli', 'wpli2_debiased']
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# con is a 3D array, get the connectivity for the first (and only) freq. band
# for each method
con_res = dict()
for method, c in zip(con_methods, con):
con_res[method] = c[:, :, 0]
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
plot_connectivity_circle(con_res['pli'], label_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)')
fig = plt.figure(num=None, figsize=(8, 4), facecolor='black')
no_names = [''] * len(label_names)
for ii, method in enumerate(con_methods):
plot_connectivity_circle(con_res[method], no_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title=method, padding=0, fontsize_colorbar=6,
fig=fig, subplot=(1, 2, ii + 1))
plt.show()
# fname_fig = data_path + '/MEG/sample/plot_inverse_connect.png'
# fig.savefig(fname_fig, facecolor='black')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load our data
Step2: Compute inverse solutions and their connectivity
Step3: Make a connectivity plot
Step4: Make two connectivity plots in the same figure
Step5: Save the figure (optional)
|
7,815
|
<ASSISTANT_TASK:>
Python Code:
OUTFN = "AK_NCDC_FirstOrderStations.json"
SAVEDATA = False
stationdata = []
for station in all_stations:
path = os.path.join(endpoint_stations, "GHCND:{}".format(station))
fullbase = requests.compat.urljoin(baseurl, path)
r = requests.get(
fullbase,
headers=custom_headers,
)
stationdata.append(json.loads(r.text))
if SAVEDATA:
with open(OUTFN, "w") as fh:
fh.write(json.dumps(stationdata, indent=2))
with open(OUTFN, "w") as fh:
fh.write(json.dumps(stationdata, indent=2))
[item["name"] for item in stationdata]
shortnamedic = {
'FAIRBANKS INTERNATIONAL AIRPORT, AK US': "Fairbanks",
'ANNETTE WEATHER SERVICE OFFICE AIRPORT, AK US': "Annette",
'JUNEAU AIRPORT, AK US': "Juneau",
'YAKUTAT AIRPORT, AK US': "Yakutat",
'KODIAK AIRPORT, AK US': "Kodiak",
'KING SALMON AIRPORT, AK US': "King Salmon",
'HOMER AIRPORT, AK US': "Homer",
'COLD BAY AIRPORT, AK US': "Cold Bay",
'ANCHORAGE TED STEVENS INTERNATIONAL AIRPORT, AK US': "Anchorage",
'ST PAUL ISLAND AIRPORT, AK US': "St. Paul",
'BIG DELTA AIRPORT, AK US': "Big Delta",
'GULKANA AIRPORT, AK US': "Gulkana",
'VALDEZ WEATHER SERVICE OFFICE, AK US': "Valdez",
'MCGRATH AIRPORT, AK US': "McGrath",
'TALKEETNA AIRPORT, AK US': "Talkeetna",
'BETTLES AIRPORT, AK US': "Bettles",
'BETHEL AIRPORT, AK US': "Bethel",
'KOTZEBUE RALPH WEIN MEMORIAL AIRPORT, AK ': "Kotzebue",
'NOME MUNICIPAL AIRPORT, AK US': "Nome",
'BARROW W POST W ROGERS AIRPORT, AK US': "Barrow (Utqiaġvik)"}
for item in stationdata:
item['shortname'] = shortnamedic[item['name']]
with open(OUTFN, "w") as fh:
fh.write(json.dumps(stationdata, indent=2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Optional
|
7,816
|
<ASSISTANT_TASK:>
Python Code:
import flopy
# load the model
model_ws = os.path.join("Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws)
# Because this model is old -- it predates flopy's modelgrid implementation.
# And because modelgrid has been implemented without backward compatability
# the modelgrid object is not constructed properly.
# - We will use some sneaky pyemu to get things to how they should be
import pyemu
sr = pyemu.helpers.SpatialReference.from_namfile(
os.path.join(model_ws, ml.namefile),
delc=ml.dis.delc,
delr=ml.dis.delr
)
ml.modelgrid.set_coord_info(
xoff=sr.xll,
yoff=sr.yll,
angrot=sr.rotation,
proj4=sr.proj4_str,
merge_coord_info=True,
)
# plot some model attributes
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111,aspect="equal")
ml.upw.hk.plot(axes=ax,colorbar="K m/d",alpha=0.3)
ml.wel.plot(axes=ax) # flopy possibly now only plots BCs in black
ml.riv.plot(axes=ax)
# plot obs locations
obs = pd.read_csv(os.path.join("Freyberg","misc","obs_rowcol.dat"),delim_whitespace=True)
obs_x = [ml.modelgrid.xcellcenters[r-1,c-1] for r,c in obs.loc[:,["row","col"]].values]
obs_y = [ml.modelgrid.ycellcenters[r-1,c-1] for r,c in obs.loc[:,["row","col"]].values]
ax.scatter(obs_x,obs_y,marker='.',label="obs")
#plot names on the pumping well locations
wel_data = ml.wel.stress_period_data[0]
wel_x = ml.modelgrid.xcellcenters[wel_data["i"],wel_data["j"]]
wel_y = ml.modelgrid.ycellcenters[wel_data["i"],wel_data["j"]]
for i,(x,y) in enumerate(zip(wel_x,wel_y)):
ax.text(x,y,"{0} ".format(i+1),ha="right",va="center", font=dict(size=15), color='r')
ax.set_ylabel("y")
ax.set_xlabel("x")
ax.add_patch(rect((0,0),0,0,label="well",ec="none",fc="r"))
ax.add_patch(rect((0,0),0,0,label="river",ec="none",fc="g"))
ax.legend(bbox_to_anchor=(1.5,1.0),frameon=False)
plt.savefig("domain.pdf")
import pyemu
pst = pyemu.Pst(os.path.join("Freyberg","freyberg.pst"))
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,num_reals=200)
pe.head()
pst.parrep(pst.filename.replace(".pst",".par"))
pst.parameter_data.parval1
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,num_reals=200)
pe.head()
pe = pyemu.ParameterEnsemble.from_uniform_draw(pst=pst,num_reals=1000)
ax = plt.subplot(111)
pe._df.loc[:,"rch_1"].plot(kind="hist",bins=20,ax=ax,alpha=0.5)
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,num_reals=1000)
pe._df.loc[:,"rch_1"].plot(kind="hist",bins=20,ax=ax,alpha=0.5)
pe = pyemu.ParameterEnsemble.from_uniform_draw(pst=pst,num_reals=1000)
ax = plt.subplot(111)
pe._df.loc[:,"rch_1"].plot(kind="hist",bins=20,ax=ax,alpha=0.5)
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,num_reals=1000)
pe.enforce(how="reset")
pe._df.loc[:,"rch_1"].plot(kind="hist",bins=20,ax=ax,alpha=0.5)
# get the list of forecast names from the pest++ argument in the pest control file
jco = os.path.join("Freyberg","freyberg.jcb")
sc = pyemu.Schur(jco=jco)
pe_post = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,cov=sc.posterior_parameter, num_reals=1000)
pe_post.enforce()
ax = plt.subplot(111)
pe._df.loc[:,"rch_1"].plot(kind="hist",bins=20,ax=ax,alpha=0.5)
pe_post._df.loc[:,"rch_1"].plot(kind="hist",bins=20,ax=ax,alpha=0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The plot shows the Freyberg (1988) model domain. The colorflood is the hydraulic conductivity ($\frac{m}{d}$). Red and green cells coorespond to well-type and river-type boundary conditions. Blue dots indicate the locations of water levels used for calibration.
Step2: Drawing from the prior
Step3: draw also accepts a num_reals argument to specify the number of draws to make
Step4: Note that these draw methods use initial parameter values in the control file (the Pst.parameter_data.parval1 attribute) the $\boldsymbol{\mu}$ (mean) prior parameter vector. To change that, we need to update the parameter values in the control file
Step5: plotting
Step6: The gaussian histo go beyond the parameter bound - bad times. Luckily, ParameterEnsemble includes an enforce method to apply parameter bounds
Step7: bayes linear monte carlo
|
7,817
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('lc', compute_phases=phoebe.linspace(0,1,101))
b.run_compute(irrad_method='none')
times = b.get_value('times', context='model')
fluxes = b.get_value('fluxes', context='model') + np.random.normal(size=times.shape) * 0.01
sigmas = np.ones_like(times) * 0.05
b = phoebe.default_binary()
b.set_value('q', 0.8)
b.set_value('ecc', 0.1)
b.set_value('irrad_method', 'none')
b.add_dataset('orb', compute_times=np.linspace(0,4,1000), dataset='orb01', component=['primary', 'secondary'])
b.add_dataset('lc', times=times, fluxes=fluxes, sigmas=sigmas, dataset='lc01')
b.set_value(qualifier='incl', kind='orbit', value=90)
b.run_compute(model='run_with_incl_90')
b.set_value(qualifier='incl', kind='orbit', value=85)
b.run_compute(model='run_with_incl_85')
b.set_value(qualifier='incl', kind='orbit', value=80)
b.run_compute(model='run_with_incl_80')
afig, mplfig = b['orb@run_with_incl_80'].plot(time=1.0, show=True)
afig, mplfig = b['orb@run_with_incl_80'].plot(time=1.0,
highlight_marker='s',
highlight_color='g',
highlight_ms=20,
show=True)
afig, mplfig = b['orb@run_with_incl_80'].plot(time=1.0,
highlight=False,
show=True)
afig, mplfig = b['orb@run_with_incl_80'].plot(time=0.5,
uncover=True,
show=True)
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(xunit='AU', yunit='AU', show=True)
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(xlabel='X POS', ylabel='Z POS', show=True)
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(xlim=(-2,2), show=True)
afig, mplfig = b['lc01@dataset'].plot(yerror='sigmas', show=True)
afig, mplfig = b['lc01@dataset'].plot(yerror=None, show=True)
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(c='r', show=True)
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(x='times', c='vws', show=True)
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(x='times', c='vws', cmap='spring', show=True)
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(x='times', c='vws', draw_sidebars=True, show=True)
afig, mplfig = b['orb@run_with_incl_80'].plot(show=True, legend=True)
afig, mplfig = b['primary@orb@run_with_incl_80'].plot(label='primary')
afig, mplfig = b['secondary@orb@run_with_incl_80'].plot(label='secondary', legend=True, show=True)
afig, mplfig = b['orb@run_with_incl_80'].plot(show=True, legend=True, legend_kwargs={'loc': 'center', 'facecolor': 'r'})
afig, mplfig = b['orb01@primary@run_with_incl_80'].plot(linestyle=':', s=0.1, show=True)
afig, mplfig = b['orb@run_with_incl_80'].plot(time=0, projection='3d', show=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This first line is only necessary for ipython noteboooks - it allows the plots to be shown on this page instead of in interactive mode. Depending on your version of Jupyter, Python, and matplotlib - you may or may not need this line in order to see plots in the notebook.
Step2: First we're going to create some fake observations so that we can show how to plot observational data. In real life, we would use something like np.loadtxt to get arrays from a data file instead.
Step3: Now we'll create a new Bundle and attach an orbit dataset (without observations) and a light curve dataset (with our "fake" observations - see Datasets for more details)
Step4: And run several forward models. See Computing Observables for more details.
Step5: Time (highlight and uncover)
Step6: To change the style of the "highlighted" points, you can pass matplotlib recognized markers, colors, and markersizes to the highlight_marker, highlight_color, and highlight_ms keywords, respectively.
Step7: To disable highlighting, simply send highlight=False
Step8: Uncover
Step9: Units
Step10: WARNING
Step11: Axes Limits
Step12: Errorbars
Step13: To disable the errorbars, simply set yerror=None.
Step14: Colors
Step15: In addition, you can point to an array in the dataset to use as color.
Step16: Choosing colors works slightly differently for meshes (ie you can set fc for facecolor and ec for edgecolor). For more details, see the tutorial on the MESH dataset.
Step17: Adding a Colorbar
Step18: Labels and Legends
Step19: The legend labels are generated automatically, but can be overriden by passing a string to the label keyword.
Step20: To override the position or styling of the legend, you can pass valid options to legend_kwargs which will be passed on to plt.legend
Step21: Other Plotting Options
Step22: 3D Axes
|
7,818
|
<ASSISTANT_TASK:>
Python Code:
#Initializations
from IPython.core.display import HTML
HTML(open("../styles/custom.css", "r").read())
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
pi = math.pi
from pint import UnitRegistry
ur = UnitRegistry()
# ideal gas parameters for air
gamma = 1.4
R = 8.314 * ur.J/(ur.degK*ur.mol)
MW = 28.966 * ur.grams/ur.mol
# ambient conditions
Pa = 1.0 * ur.atm
Ta = ur.Quantity(20,ur.degC).to(ur.degK)
def Qunchoke(P,Pa):
Pr = Pa/P
rho = MW*P/(R*Ta)
b = 2.0*(gamma/(gamma-1))*rho*P
Q = np.sqrt(b*(Pr**(2.0/gamma)-Pr**((1.0+1.0/gamma))))
return Q.to(ur.kg/ur.m**2/ur.sec)
P = np.linspace(1.0,3.0,100) * ur.atm
Qu = [Qunchoke(p,Pa) for p in P]
plt.plot(P,[q.magnitude for q in Qu])
plt.title('Unchoked Flow from an Orifice, CD = 1')
plt.ylabel(Qu[0].units)
plt.xlabel(P.units)
# ideal gas parameters for air
gamma = 1.4
R = 8.314 * ur.J/(ur.degK*ur.mol)
MW = 28.966 * ur.grams/ur.mol
# ambient conditions
Pa = 1.0 * ur.atm
Ta = ur.Quantity(20,ur.degC).to(ur.degK)
def Qchoke(P):
rho = MW*P/(R*Ta)
a = gamma*P*rho
Q = np.sqrt(a*(2.0/(gamma+1))**((gamma+1)/(gamma-1)))
return Q.to(ur.kg/ur.m**2/ur.sec)
P = np.linspace(1.0,3.0,100) * ur.atm
Qc = [Qchoke(p) for p in P]
plt.plot(P,[q.magnitude for q in Qc],P,[q.magnitude for q in Qc])
plt.title('Choked Flow from an Orifice, CD = 1')
plt.ylabel(Qc[0].units)
plt.xlabel(P.units)
gamma = dict()
gamma['air (dry)'] = 1.4
gamma['argon'] = 1.67
gamma['butane'] = 1.096
gamma['carbon dioxide'] = 1.30
gamma['chlorine'] = 1.33
gamma['helium'] = 1.660
gamma['hydrogen'] = 1.41
gamma['nitrogen'] = 1.404
gamma['oxygen'] = 1.400
gamma['propane'] = 1.15
print "{0:<15s} {1:6s} {2:6s}".format("Species","CR", "1/CR")
for s in sorted(gamma.keys()):
r = ((gamma[s]+1)/2)**(gamma[s]/(gamma[s]-1))
print "{0:<15s} {1:6.4f} {2:6.4f}".format(s, r, 1.0/r)
def isChoked(Phi, Plo = 101325.0, gamma = 1.4):
r = ((gamma+1)/2)**(gamma/(gamma-1))
return Phi > r*Plo
from pint import UnitRegistry
ur = UnitRegistry()
# ideal gas parameters for air
gamma = 1.4
R = 8.314 * ur.J/(ur.degK*ur.mol)
MW = 28.966 * ur.grams/ur.mol
# ambient conditions
Pa = 1.0 * ur.atm
Ta = ur.Quantity(20,ur.degC).to(ur.degK)
def CD(P, Pa = 1.0 * ur.atm):
return 0.9 - 0.3*Pa/P
def Qflow(T, P, Pa = 1.0 * ur.atm):
rho = MW*P/(R*T)
r = ((gamma+1)/2)**(gamma/(gamma-1))
if P <= r*Pa:
Pr = Pa/P
b = 2.0*(gamma/(gamma-1))*rho*P
Q = np.sqrt(b*(Pr**(2.0/gamma)-Pr**((1.0+1.0/gamma))))
else:
a = gamma*P*rho
Q = np.sqrt(a*(2.0/(gamma+1))**((gamma+1)/(gamma-1)))
return Q.to(ur.kg/ur.m**2/ur.sec)
def Mflow(T, P, Pa, A = 1.0*ur.m**2):
return (CD(P,Pa)*A*Qflow(T,P,Pa)).to_base_units()
P = np.linspace(1.0,2.5,100) * ur.atm
M = [Mflow(Ta,p,Pa,A = 100*ur.cm**2) for p in P]
plt.plot(P,[m.magnitude for m in M])
plt.title('Mass Flow from an 100 cm**2 Orifice')
plt.ylabel(M[0].units)
plt.xlabel(P.units)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Coefficient of Discharge
Step2: Choked Flow
Step3: Critical Pressure Ratio
Step4: Procedure to Calculate the Airbag Vent Mass Flowrate
|
7,819
|
<ASSISTANT_TASK:>
Python Code:
import PIL
import PIL.Image
import scipy
import scipy.misc
ref = PIL.Image.open("sky.jpg")
ref = numpy.array(ref)
ref = scipy.misc.imresize(ref, 0.25, interp="bicubic")
target = PIL.Image.open("bird.jpg")
target = numpy.array(target)
target = scipy.misc.imresize(target, 0.25, interp="bicubic")
target_array_rgb = numpy.array(target)
target_array_rgb_resized = scipy.misc.imresize(target, 0.5, interp="bicubic")
rny = ref.shape[0]
rnx = ref.shape[1]
tny = target_array_rgb_resized.shape[0]
tnx = target_array_rgb_resized.shape[1]
oy = 10
ox = 50
#oy = 50
#ox = 250
target_array_rgb_resized_view = target_array_rgb_resized.view(dtype=[("r", numpy.uint8), ("g", numpy.uint8), ("b", numpy.uint8)]).squeeze()
zeros = numpy.zeros_like(ref)
zeros_view = zeros.view(dtype=[("r", numpy.uint8), ("g", numpy.uint8), ("b", numpy.uint8)]).squeeze()
target = zeros_view.copy()
target[oy:oy+tny, ox:ox+tnx] = target_array_rgb_resized_view
target = numpy.reshape(target.view(dtype=numpy.uint8), ref.shape)
mask = numpy.zeros((rny,rnx), dtype=numpy.uint8)
mask[oy:oy+tny, ox:ox+tnx] = 1
naive_clone = ref.copy()
naive_clone[mask == 1] = target[mask == 1]
figsize(19,4)
matplotlib.pyplot.subplot(141)
matplotlib.pyplot.imshow(ref);
matplotlib.pyplot.title("ref");
matplotlib.pyplot.subplot(142)
matplotlib.pyplot.imshow(target);
matplotlib.pyplot.title("target");
matplotlib.pyplot.subplot(143)
matplotlib.pyplot.imshow(mask);
matplotlib.pyplot.title("mask");
matplotlib.pyplot.subplot(144)
matplotlib.pyplot.imshow(naive_clone);
matplotlib.pyplot.title("naive_clone");
import skimage
import skimage.morphology
strict_interior = skimage.morphology.erosion(mask, numpy.ones((3,3), dtype=numpy.uint8))
strict_interior_indices = strict_interior.nonzero()
num_strict_interior_pixels = strict_interior_indices[0].shape[0]
border = mask - strict_interior
figsize(9,4)
matplotlib.pyplot.subplot(121);
matplotlib.pyplot.imshow(strict_interior, interpolation="nearest");
matplotlib.pyplot.title("strict_interior");
matplotlib.pyplot.subplot(122);
matplotlib.pyplot.imshow(border, interpolation="nearest");
matplotlib.pyplot.title("border");
import scipy
import scipy.sparse
import scipy.sparse.linalg
ref_greyscale = ref[:,:,0].copy()
target_greyscale = target[:,:,0].copy()
X_current = numpy.zeros_like(mask, dtype=numpy.float32)
X_next = numpy.zeros_like(mask, dtype=numpy.float32)
X_current[border == 1] = ref_greyscale[border == 1]
X_next[border == 1] = ref_greyscale[border == 1]
num_iterations = 1500
print_frequency = 25
for n in range(num_iterations):
if n % print_frequency == 0:
print n
seamless_clone_greyscale = ref_greyscale.copy()
seamless_clone_greyscale[strict_interior_indices] = X_current[strict_interior_indices]
scipy.misc.imsave("%d.png" % n, seamless_clone_greyscale / 255.0)
for i in range(num_strict_interior_pixels):
y = strict_interior_indices[0][i]
x = strict_interior_indices[1][i]
x_right = x+1
x_left = x-1
y_up = y-1
y_down = y+1
x_neighbors = []
y_neighbors = []
if x_right < rnx:
y_neighbors.append(y)
x_neighbors.append(x_right)
if y_up >= 0:
y_neighbors.append(y_up)
x_neighbors.append(x)
if x_left >= 0:
y_neighbors.append(y)
x_neighbors.append(x_left)
if y_down < rny:
y_neighbors.append(y_down)
x_neighbors.append(x)
y_neighbors = numpy.array(y_neighbors)
x_neighbors = numpy.array(x_neighbors)
strict_interior_neighbors = (strict_interior[(y_neighbors,x_neighbors)] == 1).nonzero()
border_neighbors = (strict_interior[(y_neighbors,x_neighbors)] == 0).nonzero()
num_neighbors = y_neighbors.shape[0]
sum_X_current_strict_interior_neighbors = numpy.sum(X_current[(y_neighbors[strict_interior_neighbors],x_neighbors[strict_interior_neighbors])])
sum_vq = (num_neighbors * target_greyscale[y,x]) - numpy.sum(target_greyscale[(y_neighbors, x_neighbors)])
sum_border_f = numpy.sum(ref_greyscale[(y_neighbors[border_neighbors],x_neighbors[border_neighbors])])
X_xy_next = (sum_X_current_strict_interior_neighbors + sum_border_f + sum_vq) / num_neighbors
X_next[y,x] = numpy.clip(X_xy_next, 0.0, 255.0)
#if i == 0:
# print "-"
# print ref_greyscale[(y_neighbors[border_neighbors],x_neighbors[border_neighbors])]
# print X_current[(y_neighbors,x_neighbors)]
# print "-"
# print sum_X_current_strict_interior_neighbors
# print sum_vq
# print sum_border_f
# print X_current[y,x]
# print X_xy_next
# print
#print "----"
X_current, X_next = X_next, X_current
seamless_clone_greyscale = ref_greyscale.copy()
seamless_clone_greyscale[strict_interior_indices] = X_current[strict_interior_indices]
figsize(15,15)
matplotlib.pyplot.imshow(seamless_clone_greyscale, interpolation="nearest", cmap="gray");
matplotlib.pyplot.title("seamless_clone_greyscale");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: compute strict interior and border regions
Step2: compute seamless clone (red)
|
7,820
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from math import pi, sin, cos
import numpy as np
import openmc
fuel = openmc.Material(name='fuel')
fuel.add_element('U', 1.0)
fuel.add_element('O', 2.0)
fuel.set_density('g/cm3', 10.0)
clad = openmc.Material(name='zircaloy')
clad.add_element('Zr', 1.0)
clad.set_density('g/cm3', 6.0)
heavy_water = openmc.Material(name='heavy water')
heavy_water.add_nuclide('H2', 2.0)
heavy_water.add_nuclide('O16', 1.0)
heavy_water.add_s_alpha_beta('c_D_in_D2O')
heavy_water.set_density('g/cm3', 1.1)
# Outer radius of fuel and clad
r_fuel = 0.6122
r_clad = 0.6540
# Pressure tube and calendria radii
pressure_tube_ir = 5.16890
pressure_tube_or = 5.60320
calendria_ir = 6.44780
calendria_or = 6.58750
# Radius to center of each ring of fuel pins
ring_radii = np.array([0.0, 1.4885, 2.8755, 4.3305])
# These are the surfaces that will divide each of the rings
radial_surf = [openmc.ZCylinder(r=r) for r in
(ring_radii[:-1] + ring_radii[1:])/2]
water_cells = []
for i in range(ring_radii.size):
# Create annular region
if i == 0:
water_region = -radial_surf[i]
elif i == ring_radii.size - 1:
water_region = +radial_surf[i-1]
else:
water_region = +radial_surf[i-1] & -radial_surf[i]
water_cells.append(openmc.Cell(fill=heavy_water, region=water_region))
plot_args = {'width': (2*calendria_or, 2*calendria_or)}
bundle_universe = openmc.Universe(cells=water_cells)
bundle_universe.plot(**plot_args)
surf_fuel = openmc.ZCylinder(r=r_fuel)
fuel_cell = openmc.Cell(fill=fuel, region=-surf_fuel)
clad_cell = openmc.Cell(fill=clad, region=+surf_fuel)
pin_universe = openmc.Universe(cells=(fuel_cell, clad_cell))
pin_universe.plot(**plot_args)
num_pins = [1, 6, 12, 18]
angles = [0, 0, 15, 0]
for i, (r, n, a) in enumerate(zip(ring_radii, num_pins, angles)):
for j in range(n):
# Determine location of center of pin
theta = (a + j/n*360.) * pi/180.
x = r*cos(theta)
y = r*sin(theta)
pin_boundary = openmc.ZCylinder(x0=x, y0=y, r=r_clad)
water_cells[i].region &= +pin_boundary
# Create each fuel pin -- note that we explicitly assign an ID so
# that we can identify the pin later when looking at tallies
pin = openmc.Cell(fill=pin_universe, region=-pin_boundary)
pin.translation = (x, y, 0)
pin.id = (i + 1)*100 + j
bundle_universe.add_cell(pin)
bundle_universe.plot(**plot_args)
pt_inner = openmc.ZCylinder(r=pressure_tube_ir)
pt_outer = openmc.ZCylinder(r=pressure_tube_or)
calendria_inner = openmc.ZCylinder(r=calendria_ir)
calendria_outer = openmc.ZCylinder(r=calendria_or, boundary_type='vacuum')
bundle = openmc.Cell(fill=bundle_universe, region=-pt_inner)
pressure_tube = openmc.Cell(fill=clad, region=+pt_inner & -pt_outer)
v1 = openmc.Cell(region=+pt_outer & -calendria_inner)
calendria = openmc.Cell(fill=clad, region=+calendria_inner & -calendria_outer)
root_universe = openmc.Universe(cells=[bundle, pressure_tube, v1, calendria])
geom = openmc.Geometry(root_universe)
geom.export_to_xml()
mats = openmc.Materials(geom.get_all_materials().values())
mats.export_to_xml()
p = openmc.Plot.from_geometry(geom)
p.color_by = 'material'
p.colors = {
fuel: 'black',
clad: 'silver',
heavy_water: 'blue'
}
p.to_ipython_image()
settings = openmc.Settings()
settings.particles = 1000
settings.batches = 20
settings.inactive = 10
settings.source = openmc.Source(space=openmc.stats.Point())
settings.export_to_xml()
fuel_tally = openmc.Tally()
fuel_tally.filters = [openmc.DistribcellFilter(fuel_cell)]
fuel_tally.scores = ['flux']
tallies = openmc.Tallies([fuel_tally])
tallies.export_to_xml()
openmc.run(output=False)
sp = openmc.StatePoint('statepoint.{}.h5'.format(settings.batches))
t = sp.get_tally()
t.get_pandas_dataframe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's begin by creating the materials that will be used in our model.
Step2: With our materials created, we'll now define key dimensions in our model. These dimensions are taken from the example in section 11.1.3 of the Serpent manual.
Step3: To begin creating the bundle, we'll first create annular regions completely filled with heavy water and add in the fuel pins later. The radii that we've specified above correspond to the center of each ring. We actually need to create cylindrical surfaces at radii that are half-way between the centers.
Step4: Let's see what our geometry looks like so far. In order to plot the geometry, we create a universe that contains the annular water cells and then use the Universe.plot() method. While we're at it, we'll set some keyword arguments that can be reused for later plots.
Step5: Now we need to create a universe that contains a fuel pin. Note that we don't actually need to put water outside of the cladding in this universe because it will be truncated by a higher universe.
Step6: The code below works through each ring to create a cell containing the fuel pin universe. As each fuel pin is created, we modify the region of the water cell to include everything outside the fuel pin.
Step7: Looking pretty good! Finally, we create cells for the pressure tube and calendria and then put our bundle in the middle of the pressure tube.
Step8: Let's look at the final product. We'll export our geometry and materials and then use plot_inline() to get a nice-looking plot.
Step9: Interpreting Results
Step10: The return code of 0 indicates that OpenMC ran successfully. Now let's load the statepoint into a openmc.StatePoint object and use the Tally.get_pandas_dataframe(...) method to see our results.
|
7,821
|
<ASSISTANT_TASK:>
Python Code:
import sympy
sympy.init_printing()
Theta = sympy.Matrix(sympy.symbols(
'theta_0:3_0:4')).reshape(3,4)
def Y(n):
return sympy.Matrix(sympy.symbols(
'G_x:z_0:{:d}'.format(n+1))).T.reshape(3, n+1)
def C(n):
return sympy.ones(n+1, 1)
def T(n):
return sympy.Matrix(sympy.symbols('T_0:{:d}'.format(n+1)))
def T2(n):
return T(n).multiply_elementwise(T(n))
def T3(n):
return T2(n).multiply_elementwise(T(n))
def X(n):
return C(n).row_join(T(n)).row_join(T2(n)).row_join(T3(n)).T
X(0)*X(0).T
X(1)*X(1).T
dX = X(1)*X(1).T - X(0)*X(0).T
dX
Y(0)*X(0).T
Y(1)*X(1).T
dYXT = Y(1)*X(1).T - Y(0)*X(0).T
dYXT
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This let's us derive a recurisve form.
|
7,822
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%matplotlib inline
# load the raw data
df = pd.read_csv('../Data/shaneiphone_exp2.csv')
# plot gravity signal
df[['motionGravityX', 'motionGravityY', 'motionGravityZ']].plot()
# plot gyroscope signal [indices 7000 to 10000 include a series of sharp turns in the Censio parking lot]
df[7000:10000][['gyroRotationX', 'gyroRotationY', 'gyroRotationZ']].plot()
from quatrotate import qv_mult # this routine implements rotation via quaternion multiplication
import numpy as np
def getrot(quatern, vector):
rotatedvector = []
for i in range(vector.shape[0]):
rotatedvector.append(qv_mult(tuple(quatern[i, :]),
tuple(vector[i, :])))
return np.array(rotatedvector)
def rotate(df):
Generate rdf, a rotated version of df where the z-axis is aligned
with gravity.
varlist = ['accelerometerAcceleration', 'motionUserAcceleration',
'motionGravity', 'motionMagneticField', 'gyroRotation']
quaternion = df[['motionQuaternionW', 'motionQuaternionX',
'motionQuaternionY', 'motionQuaternionZ']].values
for ivar in varlist:
print("..." + ivar)
xyzlist = [ivar + 'X', ivar + 'Y', ivar + 'Z']
xyz = df[xyzlist].values
xyz_rotated = getrot(quaternion, xyz)
df[ivar + 'X'] = xyz_rotated[:, 0]
df[ivar + 'Y'] = xyz_rotated[:, 1]
df[ivar + 'Z'] = xyz_rotated[:, 2]
# rotate does an in-place rotation
rdf = df.copy()
rotate(rdf)
rdf[['motionGravityX', 'motionGravityY', 'motionGravityZ']].plot()
# indices 7000 to 10000 include a series of sharp turns in the Censio parking lot
rdf[7000:10000][['gyroRotationX', 'gyroRotationY', 'gyroRotationZ']].plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The XYZ axes from SensorLog are in the frame of the iPhone. On the way to Censio, my phone was placed flat on the driver seat. On the return trip Nick had my phone in his pocket at a slight angle. This is reflected in the step function like behavior of the gravityXYZ time series.
Step2: This step function like behavior is present in the accelerometer and gyroscope XYZ signals. My phone was on the driver side for the trip out and on the passenger side for the return trip. Machine learning classifiers will pick up on this signal and classify driver vs. passenger side simply based on phone orientation. This will generalize poorly, since there is no reason to expect that phones placed on a particular side of a vehicle will have a given orientation. Therefore, the reference frame needs to be adjusted to match that of the vehicle. This can be done using quaternions.
Step4: If the reference frame was that of the vehicle instead of the phone, only gyroRotationZ should trace turns.
Step5: Now let's investigate the rotated XYZ signals
Step6: Nice, GravityZ = -1 and GravityX = GravityY = 0
|
7,823
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
vocab_to_int = {w:i for i, w in enumerate(set(text))}
int_to_vocab = {i:w for i, w in enumerate(set(text))}
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
return {
'.': '||Period||',
',': '||Comma||',
'"': '||Quotation_Mark||',
';': '||Semicolon||',
'!': '||Exclamation_mark||',
'?': '||Question_mark||',
'(': '||Left_Parentheses||',
')': '||Right_Parentheses||',
'--': '||Dash||',
"\n": '||Return||'
}
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets')
learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
return inputs, targets, learning_rate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size, keep_prob=0.8, layers=3):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
multi = tf.contrib.rnn.MultiRNNCell([cell] * layers)
init_state = multi.zero_state(batch_size, tf.float32)
init_state = tf.identity(init_state, 'initial_state')
return multi, init_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
embeddings = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embeddings, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, 'final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
embed = get_embed(input_data, vocab_size, rnn_size)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
n_batches = len(int_text) // (batch_size * seq_length)
result = []
for i in range(n_batches):
inputs = []
targets = []
for j in range(batch_size):
idx = i * seq_length + j * seq_length
inputs.append(int_text[idx:idx + seq_length])
targets.append(int_text[idx + 1:idx + seq_length + 1])
result.append([inputs, targets])
return np.array(result)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 256
# Sequence Length
seq_length = 25
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
inputs = loaded_graph.get_tensor_by_name('input:0')
init_state = loaded_graph.get_tensor_by_name('initial_state:0')
final_state = loaded_graph.get_tensor_by_name('final_state:0')
probs = loaded_graph.get_tensor_by_name('probs:0')
return inputs, init_state, final_state, probs
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
return int_to_vocab[np.argmax(probabilities)]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
7,824
|
<ASSISTANT_TASK:>
Python Code:
# import the required libraries
import numpy as np
import time
import random
import cPickle
import codecs
import collections
import os
import math
import json
import tensorflow as tf
from six.moves import xrange
# libraries required for visualisation:
from IPython.display import SVG, display
import PIL
from PIL import Image
import matplotlib.pyplot as plt
# set numpy output to something sensible
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
!pip install -qU svgwrite
import svgwrite # conda install -c omnia svgwrite=1.1.6
tf.logging.info("TensorFlow Version: %s", tf.__version__)
!pip install -q magenta
# import our command line tools
from magenta.models.sketch_rnn.sketch_rnn_train import *
from magenta.models.sketch_rnn.model import *
from magenta.models.sketch_rnn.utils import *
from magenta.models.sketch_rnn.rnn import *
# little function that displays vector images and saves them to .svg
def draw_strokes(data, factor=0.2, svg_filename = '/tmp/sketch_rnn/svg/sample.svg'):
tf.gfile.MakeDirs(os.path.dirname(svg_filename))
min_x, max_x, min_y, max_y = get_bounds(data, factor)
dims = (50 + max_x - min_x, 50 + max_y - min_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in xrange(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 1
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
display(SVG(dwg.tostring()))
# generate a 2D grid of many vector drawings
def make_grid_svg(s_list, grid_space=10.0, grid_space_x=16.0):
def get_start_and_end(x):
x = np.array(x)
x = x[:, 0:2]
x_start = x[0]
x_end = x.sum(axis=0)
x = x.cumsum(axis=0)
x_max = x.max(axis=0)
x_min = x.min(axis=0)
center_loc = (x_max+x_min)*0.5
return x_start-center_loc, x_end
x_pos = 0.0
y_pos = 0.0
result = [[x_pos, y_pos, 1]]
for sample in s_list:
s = sample[0]
grid_loc = sample[1]
grid_y = grid_loc[0]*grid_space+grid_space*0.5
grid_x = grid_loc[1]*grid_space_x+grid_space_x*0.5
start_loc, delta_pos = get_start_and_end(s)
loc_x = start_loc[0]
loc_y = start_loc[1]
new_x_pos = grid_x+loc_x
new_y_pos = grid_y+loc_y
result.append([new_x_pos-x_pos, new_y_pos-y_pos, 0])
result += s.tolist()
result[-1][2] = 1
x_pos = new_x_pos+delta_pos[0]
y_pos = new_y_pos+delta_pos[1]
return np.array(result)
data_dir = 'http://github.com/hardmaru/sketch-rnn-datasets/raw/master/aaron_sheep/'
models_root_dir = '/tmp/sketch_rnn/models'
model_dir = '/tmp/sketch_rnn/models/aaron_sheep/layer_norm'
download_pretrained_models(models_root_dir=models_root_dir)
def load_env_compatible(data_dir, model_dir):
Loads environment for inference mode, used in jupyter notebook.
# modified https://github.com/tensorflow/magenta/blob/master/magenta/models/sketch_rnn/sketch_rnn_train.py
# to work with depreciated tf.HParams functionality
model_params = sketch_rnn_model.get_default_hparams()
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
data = json.load(f)
fix_list = ['conditional', 'is_training', 'use_input_dropout', 'use_output_dropout', 'use_recurrent_dropout']
for fix in fix_list:
data[fix] = (data[fix] == 1)
model_params.parse_json(json.dumps(data))
return load_dataset(data_dir, model_params, inference_mode=True)
def load_model_compatible(model_dir):
Loads model for inference mode, used in jupyter notebook.
# modified https://github.com/tensorflow/magenta/blob/master/magenta/models/sketch_rnn/sketch_rnn_train.py
# to work with depreciated tf.HParams functionality
model_params = sketch_rnn_model.get_default_hparams()
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
data = json.load(f)
fix_list = ['conditional', 'is_training', 'use_input_dropout', 'use_output_dropout', 'use_recurrent_dropout']
for fix in fix_list:
data[fix] = (data[fix] == 1)
model_params.parse_json(json.dumps(data))
model_params.batch_size = 1 # only sample one at a time
eval_model_params = sketch_rnn_model.copy_hparams(model_params)
eval_model_params.use_input_dropout = 0
eval_model_params.use_recurrent_dropout = 0
eval_model_params.use_output_dropout = 0
eval_model_params.is_training = 0
sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)
sample_model_params.max_seq_len = 1 # sample one point at a time
return [model_params, eval_model_params, sample_model_params]
[train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model] = load_env_compatible(data_dir, model_dir)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
def encode(input_strokes):
strokes = to_big_strokes(input_strokes).tolist()
strokes.insert(0, [0, 0, 1, 0, 0])
seq_len = [len(input_strokes)]
draw_strokes(to_normal_strokes(np.array(strokes)))
return sess.run(eval_model.batch_z, feed_dict={eval_model.input_data: [strokes], eval_model.sequence_lengths: seq_len})[0]
def decode(z_input=None, draw_mode=True, temperature=0.1, factor=0.2):
z = None
if z_input is not None:
z = [z_input]
sample_strokes, m = sample(sess, sample_model, seq_len=eval_model.hps.max_seq_len, temperature=temperature, z=z)
strokes = to_normal_strokes(sample_strokes)
if draw_mode:
draw_strokes(strokes, factor)
return strokes
# get a sample drawing from the test set, and render it to .svg
stroke = test_set.random_sample()
draw_strokes(stroke)
z = encode(stroke)
_ = decode(z, temperature=0.8) # convert z back to drawing at temperature of 0.8
stroke_list = []
for i in range(10):
stroke_list.append([decode(z, draw_mode=False, temperature=0.1*i+0.1), [0, i]])
stroke_grid = make_grid_svg(stroke_list)
draw_strokes(stroke_grid)
# get a sample drawing from the test set, and render it to .svg
z0 = z
_ = decode(z0)
stroke = test_set.random_sample()
z1 = encode(stroke)
_ = decode(z1)
z_list = [] # interpolate spherically between z0 and z1
N = 10
for t in np.linspace(0, 1, N):
z_list.append(slerp(z0, z1, t))
# for every latent vector in z_list, sample a vector image
reconstructions = []
for i in range(N):
reconstructions.append([decode(z_list[i], draw_mode=False), [0, i]])
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid)
model_dir = '/tmp/sketch_rnn/models/flamingo/lstm_uncond'
[hps_model, eval_hps_model, sample_hps_model] = load_model_compatible(model_dir)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
# randomly unconditionally generate 10 examples
N = 10
reconstructions = []
for i in range(N):
reconstructions.append([decode(temperature=0.5, draw_mode=False), [0, i]])
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid)
model_dir = '/tmp/sketch_rnn/models/owl/lstm'
[hps_model, eval_hps_model, sample_hps_model] = load_model_compatible(model_dir)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
z_0 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_0)
z_1 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_1)
z_list = [] # interpolate spherically between z_0 and z_1
N = 10
for t in np.linspace(0, 1, N):
z_list.append(slerp(z_0, z_1, t))
# for every latent vector in z_list, sample a vector image
reconstructions = []
for i in range(N):
reconstructions.append([decode(z_list[i], draw_mode=False, temperature=0.1), [0, i]])
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid)
model_dir = '/tmp/sketch_rnn/models/catbus/lstm'
[hps_model, eval_hps_model, sample_hps_model] = load_model_compatible(model_dir)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
z_1 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_1)
z_0 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_0)
z_list = [] # interpolate spherically between z_1 and z_0
N = 10
for t in np.linspace(0, 1, N):
z_list.append(slerp(z_1, z_0, t))
# for every latent vector in z_list, sample a vector image
reconstructions = []
for i in range(N):
reconstructions.append([decode(z_list[i], draw_mode=False, temperature=0.15), [0, i]])
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid)
model_dir = '/tmp/sketch_rnn/models/elephantpig/lstm'
[hps_model, eval_hps_model, sample_hps_model] = load_model_compatible(model_dir)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
z_0 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_0)
z_1 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_1)
z_list = [] # interpolate spherically between z_1 and z_0
N = 10
for t in np.linspace(0, 1, N):
z_list.append(slerp(z_0, z_1, t))
# for every latent vector in z_list, sample a vector image
reconstructions = []
for i in range(N):
reconstructions.append([decode(z_list[i], draw_mode=False, temperature=0.15), [0, i]])
stroke_grid = make_grid_svg(reconstructions, grid_space_x=25.0)
draw_strokes(stroke_grid, factor=0.3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: define the path of the model you want to load, and also the path of the dataset
Step4: We define two convenience functions to encode a stroke into a latent vector, and decode from latent vector to stroke.
Step5: Let's try to encode the sample stroke into latent vector $z$
Step6: Create generated grid at various temperatures from 0.1 to 1.0
Step7: Latent Space Interpolation Example between $z_0$ and $z_1$
Step8: Now we interpolate between sheep $z_0$ and sheep $z_1$
Step9: Let's load the Flamingo Model, and try Unconditional (Decoder-Only) Generation
Step10: Let's load the owl model, and generate two sketches using two random IID gaussian latent vectors
Step11: Let's interpolate between the two owls $z_0$ and $z_1$
Step12: Let's load the model trained on both cats and buses! catbus!
Step13: Let's interpolate between a cat and a bus!!!
Step14: Why stop here? Let's load the model trained on both elephants and pigs!!!
Step15: Tribute to an episode of South Park
|
7,825
|
<ASSISTANT_TASK:>
Python Code:
#%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter((1,2,2.5), (2,1,2)); plt.xlim((0,3)); plt.ylim((0,3));
import numpy as np
A = np.array(((1,1),(2,1),(2.5,1))); b = np.array((2,1,2)) # Create A and b
x = np.dot(np.dot(np.linalg.inv(np.dot(A.T, A)), A.T), b) # Project b onto Col(A)
xvals = np.linspace(0,3,100) # Create a set of x values
yvals = x[0]*xvals + x[1] # All y values for the equation of the line
plt.scatter((1,2,2.5), (2,1,2)); plt.plot(xvals,yvals); plt.xlim((0,3)); plt.ylim((0,3));
A = np.array(((1,1,1),(4,2,1),(6.25,2.5,1))) # The matrix for our new 3x3 system of equations.
x = np.dot(np.dot(np.linalg.inv(np.dot(A.T, A)), A.T), b) # Project b onto Col(A)
error = np.linalg.norm( np.dot(A,x) - b )
print "Error = ", error
yvals = x[0]*xvals*xvals + x[1]*xvals + x[2] # All y values for the equation of the line
plt.scatter((1,2,2.5), (2,1,2)); plt.plot(xvals,yvals); plt.xlim((0,3)); plt.ylim((0,3));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Another way to express this problem is to say, I would like to find the equation of a line that satisfies all of the above points. Take the following general equation of a line...
Step2: NOW, let's assume that instead of fitting a line we wanted to fit a parabola. This is still a linear least squares problem. That's because linear least squares only requires that the function being fit is linear in its parameters. We will look more at what that means below. Let's take a general quadratic equation.
Step3: Now we look at the resulting parabola and see that it passes through all 3 points.
|
7,826
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import networkx as nx
from matplotlib import pyplot as plt
%matplotlib inline
from scipy.io import loadmat
import warnings
warnings.filterwarnings( 'ignore' )
def mcl_iter( A, p = 2, alpha = 2, theta = 1e-8, rel_eps = 1e-4, niter = 10000 ) :
## Convert A into a transition kernel: M_{ij} is the probability of making a transition from i to j.
M = np.multiply( 1.0 / A.sum( axis = 1, dtype = np.float64 ).reshape(-1,1), A )
i = 0 ; status = -1
while i < niter :
M_prime = M.copy( )
## Expansion step: M_{ij} is the probability of reaching a vertex j from i in p hops.
M = np.linalg.matrix_power( M, p )
## Pruning: make paths with low transition probability into almost surely unused.
M[ np.abs( M ) < theta ] = 0
## Inflation step: dampen the probabilites
M = np.power( M, alpha )
## Renormalisation step: make the matrix into a stochastic transition kernel
N = M.sum( axis = 1, dtype = np.float64 )
## If a nan is encountered, then abort
if np.any( np.isnan( N ) ) :
status = -2
break
M = np.multiply( 1.0 / N.reshape(-1,1), M )
## Convergence criterion is the L1 norm of relative divergence of transition probabilities
if np.sum( np.abs( M - M_prime ) / ( np.abs( M_prime ) + rel_eps ) ) < rel_eps :
status = 0
break
## Advance to the next iteration
i += 1
return ( M, (status, i) )
def extract_communities( M, lengths = True ) :
## It is extected that the MCL matrix detects communities in columns
C = list( ) ; i0 = 0
if np.any( np.isnan( M ) ) :
return C
## Find all indices of nonzero elements
r, c = np.where( M )
## Sort them by the column index and find the community sizes
r = r[ np.argsort( c ) ]
u = np.unique( c, return_counts = True )
if np.sum( u[ 1 ] ) > M.shape[ 1 ] :
return C
if lengths :
return u[ 1 ]
## Columns indices of nonzero entries are ordered, so we just need to
## sweep across the sizes
for s in u[ 1 ] :
## Row indices for a column with a nonzero element are the indices of
## nodes in the community.
list.append( C, r[ i0:i0+s ] )
i0 += s
return C
def mcl_test( A, a_grid, p_grid, theta = 1e-8 ) :
## Run the grid test
res = [ mcl_iter( A, p = p, alpha = a, theta = theta )
for p in p_grid for a in a_grid ]
## Extract the results
## Get the number of communities
NC = np.array( [ len( extract_communities( C ) )
for C,(s,i) in res ], dtype = np.int ).reshape( len( p_grid ), -1 )
## Extract the number of iterations
NI = np.array( [ i for C,(s,i) in res ], dtype = np.int ).reshape( len( p_grid ), -1 )
return NI, NC
## Not a good way of printing tables
def show_table( S, r, c ):
print " p\\a\t", "\t".join( len( c )*[ "%#4.3g" ] ) % tuple( c )
for j in xrange( S.shape[0] ) :
if np.all( S[j,:] == 0 ) :
break
row = [ "%#2d"%(v) if v > 0 else " " for v in S[j,:] ]
print "%#6d\t"%( r[ j ] ), "\t".join( len( c )*[ "%s" ] ) % tuple( row )
## Produce a visually appealling picture of the adjacency
## matrix and community detection results
def show_network( A, C, title = "" ) :
plt.spy( A, color = "gray", markersize = .5 )
plt.spy( C, color = "magenta", markersize = 5 )
if title : plt.title( title )
alpha_grid, p_grid = np.logspace( 4.5e-3, 1, num = 10, dtype = np.float ), np.arange( 2, 21, dtype = np.int )
files = [ 'network1.mat', 'network2.mat', 'network3.mat' ]
titles = [ "Noiseless network: "+files[0], "Noisier network: "+files[1], "Noisiest network: "+files[2] ]
matrices = [ np.array( loadmat( './data/hw4/'+f )[ 'A' ], np.float ) for f in files ]
plt.figure( figsize=(18,6) )
plt.subplot( 131 )
C0, _ = mcl_iter( matrices[ 0 ] )
show_network( matrices[ 0 ], C0, titles[0] )
plt.subplot( 132 )
C1, _ = mcl_iter( matrices[ 1 ] )
show_network( matrices[ 1 ], C1, titles[1] )
plt.subplot( 133 )
C2, _ = mcl_iter( matrices[ 2 ] )
show_network( matrices[ 2 ], C2, titles[2] )
plt.show( )
theta = 1.0E-8
I11, S11 = mcl_test( matrices[ 0 ], alpha_grid, p_grid, theta = theta )
I12, S12 = mcl_test( matrices[ 1 ], alpha_grid, p_grid, theta = theta )
I13, S13 = mcl_test( matrices[ 2 ], alpha_grid, p_grid, theta = theta )
print files[ 0 ]
show_table( S11, p_grid, alpha_grid )
print files[ 1 ]
show_table( S12, p_grid, alpha_grid )
print files[ 2 ]
show_table( S13, p_grid, alpha_grid )
theta = 1.0E-4
I21, S21 = mcl_test( matrices[ 0 ], alpha_grid, p_grid, theta = theta )
I22, S22 = mcl_test( matrices[ 1 ], alpha_grid, p_grid, theta = theta )
I23, S23 = mcl_test( matrices[ 2 ], alpha_grid, p_grid, theta = theta )
print files[ 0 ]
show_table( S21, p_grid, alpha_grid )
print files[ 1 ]
show_table( S22, p_grid, alpha_grid )
print files[ 2 ]
show_table( S23, p_grid, alpha_grid )
theta = 0.01
I31, S31 = mcl_test( matrices[ 0 ], alpha_grid, p_grid, theta = theta )
I32, S32 = mcl_test( matrices[ 1 ], alpha_grid, p_grid, theta = theta )
I33, S33 = mcl_test( matrices[ 2 ], alpha_grid, p_grid, theta = theta )
print files[ 0 ]
show_table( S31, p_grid, alpha_grid )
print files[ 1 ]
show_table( S32, p_grid, alpha_grid )
print files[ 2 ]
show_table( S33, p_grid, alpha_grid )
def cases( n, pairs, theta = 1.0E-8 ) :
plt.figure( figsize=(18,6) )
for i, (a, p) in enumerate( pairs, 1 ) :
plt.subplot( int( "1" + str( len( pairs ) ) + str( i ) ) )
C, _ = mcl_iter( matrices[n], alpha = alpha_grid[a], p = p_grid[p], theta = theta )
show_network( matrices[n], C, titles[n] + ": (a=%.2f, p=%d)" % ( alpha_grid[a], p_grid[p] ) )
plt.show( )
## The first network
cases( 0, [ (1, 0), (9, 1), (9, 15) ] )
## Second network
cases( 1, [ (8, 1), (9, 1), (9, 2) ] )
## Third network
cases( 2, [ (6, 1), (9, 1), (8, 2) ] )
## Compute the scout probabilites: an m-scout probability is the
## chance of the first ever visit to j of a random walk from i
## taking place no later than m-th step.
def scout( P, n = 3, cumulative = True ) :
pi = P.copy() ; Sc = pi.copy() ; i = 1
while i < n :
## Get the probability of a j-j loop with i hops
pi_jj = pi.diagonal( ).copy( )
## The probability of the first ever visit to j from i being
## on the m-th step: V^m_{ij} = \sum_{k\neq j} P_{ik} V^{m-1}_{kj}
pi = P.dot( pi ) - np.multiply( P, pi_jj )
## The scout probability: the chance that the first visit is earlier
## or exactly at the m-th step.
Sc += pi
i += 1
return SC if cumulative else pi
if False :
A = matrices[ 0 ]
P = np.multiply( 1.0 / A.sum( axis = 1, dtype = np.float64 ).reshape(-1,1), A )
S = scout(P, 100, False)
l, v = np.linalg.eig( S )
from scipy.cluster.vq import kmeans, vq
K=4
V = v[:,np.argsort( l )[ -K: ] ]
O = vq(V,kmeans( V, K )[ 0 ])
i = np.argsort( O[ 0 ] )
plt.imshow( np.log(S[np.ix_(i,i)]))
plt.colorbar( )
def floyd_warshall( A ) :
## Create a matrix object (not just a 2D array -- different broadcasting properties)
pi = np.matrix( A, dtype = np.float, copy = True )
## Fill as of yet unreachable vertices
pi[ pi != 1 ] = np.inf
## And show that the shortest path to oneself is staying.
np.fill_diagonal( pi, 0 )
## For each transitory vertex
for v in xrange( pi.shape[ 0 ] ) :
## Decide which is faster: to use a path without it, or to pass through it.
np.minimum( pi, pi[:,v] + pi[v,:], pi )
## Return the shortest path matrix
return pi
import numpy as np
import networkx as nx
from matplotlib import pyplot as plt
%matplotlib inline
from scipy.io import loadmat
import warnings
warnings.filterwarnings( 'ignore' )
import scipy.io
import scipy.sparse as spma
import scipy.sparse.linalg as spla
data = scipy.io.loadmat('./data/hw4/music_data.mat')
A = spma.csc_matrix( data[ 'A' ], dtype = np.float )
def cluster( A, T = 100, Q = None, _index = None, mincut = False, depth = float( "inf" ), density_threshold = .05 ) :
## If the recursion depth is exceeded return
if depth <= 0 :
return np.arange( A.shape[ 0 ] )
## Create master indices if necessary
if _index is None :
_index = np.arange( A.shape[ 0 ] )
## Compute the global similarity of each vertex/element
deg = A.sum( axis = 1 ).getA1( )
## Detect non-isolated items
nz, zz = np.where( deg != 0 )[ 0 ], np.where( deg == 0 )[ 0 ]
if len( nz ) < T :
return np.arange( len( deg ) )
## This fiddling with tocsr() and tocsc() makes slicing faster, since format
## conversions are extremely fast.
S = A[:,nz].tocsr()[nz,:].tocsc()
try :
if mincut :
## Compute the unnormalised laplacian
L = spma.diags( deg[ nz ], offsets = 0 ) - S
## Get the eigenvector of the second least eigenvalue
l, e = spma.linalg.eigs( L, k = 2, which = 'SM',
v0 = np.ones( L.shape[ 1 ], np.float ) )
e = e[ :, np.argmax( l ) ].real
else :
## Compute the stochastic transition kernel of non-isolated vertices for the
## normalised cut problem.
L = spma.diags( 1.0 / deg[ nz ], offsets = 0 ).dot( S )
## Find the eigenvector corresponding to the 2nd largest eigenvalue
l, e = spma.linalg.eigs( L, k = 2, v0 = np.ones( L.shape[ 1 ], np.float ) )
## Get the real part of the second largest eigenvector
e = e[ :, np.argmin( l ) ].real
except Exception, e:
# print "size = %d : \t%s" %( A.shape[ 0 ], e.message )
## Return the global cluster if the eigenvalue computation failed to converge
return np.arange( len( deg ) )
## Set the threshold: use the zero threshold
t = 0
## Separate the items in two sets: left(n) and right (p)
n, p = np.where( e <= t )[ 0 ], np.where( e > t )[ 0 ]
N, P = S[:,n].tocsr()[n,:].tocsc( ), S[:,p].tocsr()[p,:].tocsc( )
## Compute the densities of the halves
nd, pd = N.nnz, P.nnz
nw, pw = len( n ) * ( len( n ) - 1.0 ), len( p ) * ( len( p ) - 1.0 )
## If there is enough elements in a set, split it.
if ( len( p ) > T ) and ( density_threshold * pw > pd ) :
p = p[ cluster( P, T = T, Q = Q, _index = _index[ nz[ p ] ],
mincut = mincut, depth = depth - 1, density_threshold = density_threshold ) ]
if ( len( n ) > T ) and ( density_threshold * nw > nd ) :
n = n[ cluster( N, T = T, Q = Q, _index = _index[ nz[ n ] ],
mincut = mincut, depth = depth - 1, density_threshold = density_threshold ) ]
## Update the queue of clusters
if Q is not None :
Q.append( _index[ nz[ p ] ] )
Q.append( _index[ nz[ n ] ] )
Q.append( _index[ zz ] )
# Q.append( _index[ np.concatenate( ( zz, nz[ n ], nz[ p ] ) ) ] )
## Reorder the clusters so that the denser groups are shifted to the left
if nd*pw > pd*nw : p, n = n, p
return np.concatenate( ( zz, nz[ n ], nz[ p ] ) )
fig = plt.figure( figsize = ( 16, 16 ) )
axs = fig.add_subplot(1, 1, 1, axisbg = 'black')
I = cluster( A, T = 5, depth = 3, density_threshold = .2 )
axs.spy( A[:,I].tocsr( )[I,:], marker = '.', markersize = 3, precision = 0, alpha = .35, color = 'magenta' )
I = cluster( A, T = 5, depth = 5, density_threshold = .2 )
axs.spy( A[:,I].tocsr( )[I,:], marker = '.', markersize = 2, precision = 0, alpha = .25, color = 'green' )
I = cluster( A, T = 5, depth = 7, density_threshold = .2 )
axs.spy( A[:,I].tocsr( )[I,:], marker = '.', markersize = 1, precision = 0, alpha = .25, color = 'cyan' )
I = cluster( A, T = 5, depth = np.inf, density_threshold = .2 )
axs.spy( A[:,I].tocsr( )[I,:], marker = '.', markersize = 1, precision = 0, alpha = .25, color = 'gold' )
cluster_tree = list( )
I = cluster( A, T = 5, Q = cluster_tree, depth = np.inf, density_threshold = .2 )
## Compute the density
den = np.zeros( len( cluster_tree ), np.float )
dia = A.diagonal( )
for i, c in enumerate( cluster_tree ) :
## Omit residual clusters
if len( c ) < 2 : continue
within = A[:,c].tocsr()[c,:].nnz # sum( ) - dia[c].sum( )
weight = len( c ) * ( len( c ) - 1.0 )
den[ i ] = within / weight
## Reorder
order = np.argsort( den )
top = order[ ~np.isnan( den[ order ] ) ][::-1]
plt.loglog( den[ top ], [ len( cluster_tree[ i ] ) for i in top ] )
plt.xlabel( 'Density (log)' ) ; plt.ylabel( 'Size (log)' )
fig = plt.figure( figsize = ( 16, 16 ) )
axs = fig.add_subplot(1, 1, 1, axisbg = 'black')
axs.spy( A[:,I].tocsr( )[I,:], marker = '.', markersize = 3, precision = 0, alpha = .50, color = 'cyan' )
H = spma.csc_matrix( A.shape, dtype = np.int8 )
for f in top[ np.where( np.logical_and( den[ top ] > .05, den[ top ] <= .2 ) ) ] :
C = cluster_tree[ f ]
H[ np.ix_( C, C ) ] = 1
axs.spy( H[:,I].tocsr( )[I,:], marker = '.', markersize = 1, precision = 0, alpha = .25, color = 'magenta' )
H = spma.csc_matrix( A.shape, dtype = np.int8 )
for f in top[ np.where( den[ top ] > .30 ) ] :
C = cluster_tree[ f ]
H[ np.ix_( C, C ) ] = 1
axs.spy( H[:,I].tocsr( )[I,:], marker = '.', markersize = 1, precision = 0, alpha = .15, color = 'yellow' )
for n, i in enumerate( top, 1 ) :
c = cluster_tree[ i ]
if len( c ) and den[ i ] > .3 :
print "%#4d (%#4d, %0.3f)\t"%(n,len( c ), den[ i ]), ", ".join(sorted( [ s.strip() for s in data['artists'][ c ] ] ) ), "\n"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problems
Step2: The agorithm is desinged to transform the graph connectivity in such a way as to disconnect different communities and concentrate connectivity within one.
Step3: Below are procedure for running the test, collecting the results and visualizing and printing them.
Step4: Create $10\times 7$ grid of $(\alpha, p)$ pairs, with $\alpha$ spaced evenly apart from $1.1$ to $10$ on the $\log$-scale, and $p$ running from $1$ to $20$ with step $3$.
Step5: This grid will be used throughout this task.
Step6: Below are the communities detected by the MCL with default parameters $\alpha=2$, $p=2$ and $\theta=10^{-8}$ for various leves of noise in the source data.
Step7: Case
Step8: Run the mcl for the first network.
Step9: Run the mcl for the second network.
Step10: And finally for the last one.
Step11: Analysis
Step12: As expected the higher the $\alpha$ the less perceptive the alogrithm is to the macrostructure. In contrast, high scouting range $p$ permits longer paths within a community, thereby making the iterative procedure absorb smaller communities into larger ones.
Step13: Run the experiment
Step14: Show the resulting tables
Step15: The conclusions about the effects of $\alpha$ and $p$ match the $\theta=10^{-8}$ case. Higher $\theta$ narrowed the area, where the algorithm was able to detect any community.
Step16: Show the results
Step17: The significance threshold $\theta$ severely reduced the range of $\alpha$-$p$ pairs, for which MCT detects anything at all. Within that region, however, parameters affect the detection similarly to the previous cases.
Step18: Summary
Step19: Residual code from the failed attempt.
Step20: Maybe the all-pars shortest path matrix could yield some insight into the effects of the different parameter settings.
Step21: <hr />
Step22: Load Yahoo Music network. Edges in this network appear if enough number of users have given ratings to both music bands. Note, that edges are weighted with similarity of the ratings.
Step23: Consider a graph $G=(V,E)$ given by a similarity matrix $A$. The basic idea of recursive spectral clsutering is to continue splitting $V$ in two clusters until either a cluster is depleted or a clique is detected. To cut a long story short, using the equivalent reformulation of the spectral clustering problem, the goal is to find an eigenvector corresponding to the second largest eigenvalue.
Step24: Results
Step25: Recursive spectral clustering does indeed reveal certain connectivity structure in the network even for a restriction on depth of the recusion tree to at most seven levels ($\leq$128 clusters).
Step26: Oviously denser cluster have less nodes
Step27: Lets visualize the densest clusters
Step28: It is actaully quite difficult to analyse how well the clustering preformed, since we were not given the information on the artist's genre of music. However moderate googling revealed the genres of some of the densest clusters (#rank)
|
7,827
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from fbprophet import Prophet
DATA_HOME_DIR = '/data/airline'
df = pd.read_csv(DATA_HOME_DIR+'/international-airline-passengers.csv',
sep=';',
names=['ds', 'y'],
header=0,
parse_dates=[0],
nrows=144,
)
df.head(3)
df.info()
df['y'] = np.log(df['y'])
m = Prophet(weekly_seasonality=False)
m.fit(df)
future = m.make_future_dataframe(periods=36, freq='M')
future.tail()
forecast = m.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
m.plot(forecast)
m.plot_components(forecast)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The input to Prophet is always a dataframe with two columns
Step2: It looks like we have a exponential growth trend in the data, so in order to accomodate for the linear fitting we take the log.
Step3: We only have monthly data, so certainly there will be no weekly seasonality in the date. Also forecasting must take this into account and choose the right frequency.
Step4: Predictions are then made on a dataframe with a column ds containing the dates for which a prediction is to be made. You can get a suitable dataframe that extends into the future a specified number of days using the helper method Prophet.make_future_dataframe. By default it will also include the dates from the history, so we will see the model fit as well.
Step5: The predict method will assign each row in future a predicted value which it names yhat. If you pass in historical dates, it will provide an in-sample fit. The forecast object here is a new dataframe that includes a column yhat with the forecast, as well as columns for components and uncertainty intervals.
Step6: If you want to see the forecast components, you can use the Prophet.plot_components method. By default you’ll see the trend, yearly seasonality, and weekly seasonality of the time series. If you include holidays, you’ll see those here, too.
|
7,828
|
<ASSISTANT_TASK:>
Python Code:
import moldesign as mdt
import moldesign.units as u
mdt.configure()
molecule = mdt.read('data/butane.xyz')
molecule
viewer = molecule.draw()
viewer # we tell Jupyter to draw the viewer by putting it on the last line of the cell
print(viewer.selected_atoms)
molecule.set_energy_model(mdt.models.RHF, basis='sto-3g')
properties = molecule.calculate()
print(properties.keys())
print('Energy: ', properties['potential_energy'])
molecule.draw_orbitals()
mintraj = molecule.minimize()
mintraj.draw_orbitals()
molecule.write('my_first_molecule.xyz')
mintraj.write('my_first_minimization.P.gz')
mdt.widgets.GeometryBuilder(molecule)
molecule.calculate_potential_energy()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Optional
Step2: 2. Read in a molecular structure
Step3: Jupyter notebooks will automatically print out the value of the last statement in any cell. When you evaluate a Molecule, as in the cell below, you'll get some quick summary data
Step4: 3. Visualize it
Step5: Try clicking on some of the atoms in the visualization you've just created.
Step6: 4. Simulate it
Step7: 5. Minimize it
Step8: 6. Write it
Step9: 7. Play with it
|
7,829
|
<ASSISTANT_TASK:>
Python Code:
platform = 'lendingclub'
use_cuda = True
dtype = torch.cuda.FloatTensor
save_path = "model_dump/nn_1_0_0/"
store = pd.HDFStore(
dc.home_path+'/justin_tinkering/data_science/lendingclub/{0}_store.h5'.
format(platform),
append=True)
loan_info = store['train_filtered_columns']
columns = loan_info.columns.values
# checking dtypes to see which columns need one hotting, and which need null or not
to_one_hot = []
to_null_or_not = []
do_nothing = []
for col in columns:
if loan_info[col].dtypes == np.dtype('O'):
# print(col, loan_info[col].isnull().value_counts(dropna=False).to_dict())
to_one_hot.append(col)
elif len(loan_info[col].isnull().value_counts(dropna=False)) > 1:
# print(col, loan_info[col].isnull().value_counts(dropna=False).to_dict())
to_null_or_not.append(col)
else:
# print(col, loan_info[col].isnull().value_counts(dropna=False).to_dict())
do_nothing.append(col)
train_X, train_y, mean_series, std_dev_series = data_prep.process_data_train(
loan_info)
class TrainDataset(Dataset):
def __init__(self, data, targets):
self.data = data
self.targets = targets
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx,:], self.targets[idx,:]
def get_loader(dataset, use_cuda, batch_size=6400, shuffle=True):
return DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=use_cuda)
train_dataset = TrainDataset(train_X.values, train_y.values)
train_loader = get_loader(train_dataset, use_cuda)
# %%writefile model_dump/nn_1_0_0/net_class.py
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# from torch.autograd import Variable
# import numpy as np
# dtype = torch.FloatTensor
# nn_input_dim = 223
# hly1_n = 300
# hly2_n = 400
# hly3_n = 300
# hly4_n = 100
# nn_output_dim = 1
# class Net(nn.Module):
# def __init__(self):
# super(Net, self).__init__()
# self.hl1 = nn.Linear(nn_input_dim, hly1_n)
# self.hl2 = nn.Linear(hly1_n, hly2_n)
# self.hl3 = nn.Linear(hly2_n, hly3_n)
# self.hl4 = nn.Linear(hly3_n, hly4_n)
# self.out = nn.Linear(hly4_n, nn_output_dim)
# def forward(self, x):
# x = F.leaky_relu(self.hl1(x))
# x = F.leaky_relu(self.hl2(x))
# x = F.leaky_relu(self.hl3(x))
# x = F.leaky_relu(self.hl4(x))
# x = self.out(x)
# return x
# def torch_version(df_inputs, net):
# input = Variable(torch.from_numpy(df_inputs.values)).type(dtype)
# return np.round(net(input).data.cpu().numpy(),5)
nn_input_dim = 223
hly1_n = 300
hly2_n = 400
hly3_n = 300
hly4_n = 100
nn_output_dim = 1
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.hl1 = nn.Linear(nn_input_dim, hly1_n)
self.hl2 = nn.Linear(hly1_n, hly2_n)
self.hl3 = nn.Linear(hly2_n, hly3_n)
self.hl4 = nn.Linear(hly3_n, hly4_n)
self.out = nn.Linear(hly4_n, nn_output_dim)
def forward(self, x):
x = F.leaky_relu(self.hl1(x))
x = F.leaky_relu(self.hl2(x))
x = F.leaky_relu(self.hl3(x))
x = F.leaky_relu(self.hl4(x))
x = self.out(x)
return x
net = Net()
params = list(net.parameters())
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.001)
if use_cuda:
net.cuda()
criterion.cuda()
n_epochs = 500
epoch_list = []
loss_list = []
fig = plt.gcf()
fig.show()
fig.canvas.draw()
for epoch in range(n_epochs):
running_loss = 0
for i, data in enumerate(train_loader):
inputs, targets = data
# wrap in Variable
inputs, targets = Variable(inputs.cuda()).type(dtype), Variable(targets.cuda()).type(dtype)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
output = net(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
try:
last_loss = loss_list[-1]
except:
last_loss = 9999999999999
if running_loss > (2*last_loss):
pass
else:
epoch_list.append(epoch)
loss_list.append(running_loss)
if epoch % 1 == 0:
plt.plot(epoch_list, loss_list)
plt.title("Epoch: {0}".format(epoch))
fig.canvas.draw()
if (epoch >= 99) & ((epoch+1) % 20 == 0):
torch.save(net.state_dict(), save_path+'1.0.0_e{0}'.format(epoch+1))
# np_hl1_weight = net.hl1.weight.data.numpy()
# np_hl1_bias = net.hl1.bias.data.numpy()
# np_hl2_weight = net.hl2.weight.data.numpy()
# np_hl2_bias = net.hl2.bias.data.numpy()
# np_out_weight = net.out.weight.data.numpy()
# np_out_bias = net.out.bias.data.numpy()
# def np_version(df_inputs):
# np_hl1_z = df_inputs.dot(np_hl1_weight.T) + np_hl1_bias
# np_hl1_a = np.maximum(.01*np_hl1_z, np_hl1_z)
# np_hl2_z = np_hl1_a.dot(np_hl2_weight.T) + np_hl2_bias
# np_hl2_a = np.maximum(.01*np_hl2_z, np_hl2_z)
# np_out = np_hl2_a.dot(np_out_weight.T) + np_out_bias
# return np_out
def torch_version(df_inputs, net):
input = Variable(torch.from_numpy(df_inputs.values)).type(dtype)
return np.round(net(input).data.cpu().numpy(),5)
#%timeit np_version(standardized)
%timeit torch_version(train_X, net)
store.open()
test = store['test_filtered_columns']
train = store['train_filtered_columns']
loan_npv_rois = store['loan_npv_rois']
default_series = test['target_strict']
results = store['results']
store.close()
train_X, train_y = data_prep.process_data_test(train)
train_y = train_y['npv_roi_10'].values
test_X, test_y = data_prep.process_data_test(test)
test_y = test_y['npv_roi_10'].values
# regr = joblib.load('model_dump/model_0.2.1.pkl')
regr_version = '1.0.0'
test_yhat = torch_version(test_X, net)
train_yhat = torch_version(train_X, net)
test_mse = mean_squared_error(test_yhat,test_y)
train_mse = mean_squared_error(train_yhat,train_y)
def eval_models_net(trials, port_size, available_loans, net, regr_version, test, loan_npv_rois,
default_series):
results = {}
pct_default = {}
test_copy = test.copy()
for trial in tqdm_notebook(np.arange(trials)):
loan_ids = np.random.choice(
test_copy.index.values, available_loans, replace=False)
loans_to_pick_from = test_copy.loc[loan_ids, :]
scores = torch_version(loans_to_pick_from, net)
scores_series = pd.Series(dict(zip(loan_ids, scores)))
scores_series.sort_values(ascending=False, inplace=True)
picks = scores_series[:900].index.values
results[trial] = loan_npv_rois.loc[picks, :].mean().to_dict()
pct_default[trial] = (default_series.loc[picks].sum()) / port_size
pct_default_series = pd.Series(pct_default)
results_df = pd.DataFrame(results).T
results_df['pct_def'] = pct_default_series
return results_df
# as per done with baseline models, say 3000 loans available
# , pick 900 of them
trials = 20000
port_size = 900
available_loans = 3000
model_results = eval_models_net(trials, port_size, available_loans, net, regr_version, test_X, loan_npv_rois, default_series)
multi_index = []
for col in model_results.columns.values:
multi_index.append((str(col),regr_version))
append_results = model_results.copy()
append_results.columns = pd.MultiIndex.from_tuples(multi_index, names = ['discount_rate', 'model'])
multi_index_results = []
for col in results.columns.values:
multi_index_results.append((str(col[0]), col[1]))
results.columns = pd.MultiIndex.from_tuples(multi_index_results, names = ['discount_rate', 'model'])
full_results = results.join(append_results)
full_results.sort_index(axis=1, inplace=True)
full_results.describe()
store.open()
store['results'] = full_results
model_info = store['model_info']
store.close()
# dump the model
# joblib.dump(regr, 'model_dump/model_0.2.1.pkl')
joblib.dump((mean_series, std_dev_series), 'model_dump/mean_stddev.pkl')
test_mse
train_mse
now = time.strftime("%Y_%m_%d_%Hh_%Mm_%Ss")
# info to stick in detailed dataframe describing each model
model_info_dict = {'model_version': '1.0.0',
'target': 'npv_roi_10',
'weights': 'None',
'algo_model': 'feedforward NN',
'hyperparams': "nn_input_dim = 223, hly1_n = 300, hly2_n = 400, hly3_n = 300, hly4_n = 100, nn_output_dim = 1, optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.001)",
'cost_func': 'criterion = nn.MSELoss(),',
'useful_notes': 'test_mse: 0.0644643, train_mse: 0.0636180, epoch_500',
'date': now}
model_info_df = pd.DataFrame(model_info_dict, index = ['1.0.0'])
model_info.ix['1.0.0',:] = model_info_df.values
model_info.sort_index(inplace=True)
model_info
store.open()
store.append(
'model_info',
model_info,
data_columns=True,
index=True,
append=False,
)
store.close()
train_preds = pd.Series(train_yhat.ravel())
test_preds = pd.Series(test_yhat.ravel())
train_preds.hist(bins=50)
test_preds.hist(bins=50)
train_preds.describe()
test_preds.describe()
train_preds.value_counts()
test_preds.value_counts()
# try:
# results = results.join(append_results)
# except ValueError:
# results.loc[:, (slice(None), slice('1.0.0','1.0.0'))] = append_results
# results.sort_index(axis=1, inplace = True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Until I figure out a good imputation method (e.g. bayes PCA), just drop columns with null still
Step2: instantiate network
Step3: get the weights and biases of the nn into np since at this size np is faster (correction, pytorch was faster)
Step4: check that they output the same and speedtest (pytorch was faster)
Step5: Examine performance on test set
Step6: Making model info and saving it
Step7: Examine scores distributions
|
7,830
|
<ASSISTANT_TASK:>
Python Code:
def resp_elas(m,c,k, cC,cS,w, F, x0,v0):
wn2 = k/m ; wn = sqrt(wn2) ; beta = w/wn
z = c/(2*m*wn)
wd = wn*sqrt(1-z*z)
# xi(t) = R sin(w t) + S cos(w t) + D
det = (1.-beta**2)**2+(2*beta*z)**2
R = ((1-beta**2)*cS + (2*beta*z)*cC)/det/k
S = ((1-beta**2)*cC - (2*beta*z)*cS)/det/k
D = F/k
A = x0-S-D
B = (v0+z*wn*A-w*R)/wd
def x(t):
return exp(-z*wn*t)*(A*cos(wd*t)+B*sin(wd*t))+R*sin(w*t)+S*cos(w*t)+D
def v(t):
return (-z*wn*exp(-z*wn*t)*(A*cos(wd*t)+B*sin(wd*t))
+wd*exp(-z*wn*t)*(B*cos(wd*t)-A*sin(wd*t))
+w*(R*cos(w*t)-S*sin(w*t)))
return x,v
def resp_yield(m,c, cC,cS,w, F, x0,v0):
# csi(t) = R sin(w t) + S cos(w t) + Q t
Q = F/c
det = w**2*(c**2+w**2*m**2)
R = (+w*c*cC-w*w*m*cS)/det
S = (-w*c*cS-w*w*m*cC)/det
# x(t) = A exp(-c t/m) + B + R sin(w t) + S cos(w t) + Q t
# v(t) = - c A/m exp(-c t/m) + w R cos(w t) - w S sin(w t) + Q
#
# v(0) = -c A / m + w R + Q = v0
A = m*(w*R + Q - v0)/c
# x(0) = A + B + S = x0
B = x0 - A - S
def x(t):
return A*exp(-c*t/m)+B+R*sin(w*t)+S*cos(w*t)+Q*t
def v(t):
return -c*A*exp(-c*t/m)/m+w*R*cos(w*t)-w*S*sin(w*t)+Q
return x,v
def bisect(f,val,x0,x1):
h = (x0+x1)/2.0
fh = f(h)-val
if abs(fh)<1e-8 : return h
f0 = f(x0)-val
if f0*fh > 0 :
return bisect(f, val, h, x1)
else:
return bisect(f, val, x0, h)
mass = 1000. # kg
k = 40000. # N/m
zeta = 0.03 # damping ratio
fy = 2500. # N
print('Limit displacement Uy =', fy*1000/k, 'mm')
damp = 2*zeta*sqrt(k*mass)
xy = fy/k # m
t1 = 0.3 # s
w = pi/t1 # rad/s
Po = 6000. # N
x0=0.0 # m
v0=0.0 # m/s
x_next, v_next = resp_elas(mass,damp,k, 0.0,Po,w, 0.0, x0,v0)
t_yield = bisect(x_next, xy, 0.0, t1)
print(t_yield, x_next(t_yield)*k)
t_el = linspace( 0.0, t_yield, 201)
x_el = vectorize(x_next)(t_el)
v_el = vectorize(v_next)(t_el)
# ------------------------------
figure(0)
plot(t_el,x_el,
(0,0.25),(xy,xy),'--b',
(t_yield,t_yield),(0,0.0699),'--b')
title("$x_{el}(t)$")
xlabel("Time, s")
ylabel("Displacement, m")
# ------------------------------
figure(1)
plot(t_el,v_el)
title("$\dot x_{el}(t)$")
xlabel("Time, s")
ylabel("Velocity, m/s")
x0=x_next(t_yield)
v0=v_next(t_yield)
print(x0, v0)
cS = Po*cos(w*t_yield)
cC = Po*sin(w*t_yield)
print(Po*sin(w*0.55), cS*sin(w*(0.55-t_yield))+cC*cos(w*(0.55-t_yield)))
x_next, v_next = resp_yield(mass, damp, cC,cS,w, -fy, x0,v0)
t_y1 = linspace(t_yield, t1, 101)
x_y1 = vectorize(x_next)(t_y1-t_yield)
v_y1 = vectorize(v_next)(t_y1-t_yield)
figure(3)
plot(t_el,x_el, t_y1,x_y1,
(0,0.25),(xy,xy),'--b',
(t_yield,t_yield),(0,0.0699),'--b')
xlabel("Time, s")
ylabel("Displacement, m")
# ------------------------------
figure(4)
plot(t_el, v_el, t_y1, v_y1)
xlabel("Time, s")
ylabel("Velocity, m/s")
x0 = x_next(t1-t_yield)
v0 = v_next(t1-t_yield)
print(x0, v0)
x_next, v_next = resp_yield(mass, damp, 0, 0, w, -fy, x0, v0)
t2 = t1 + bisect( v_next, 0.0, 0, 0.3)
print(t2)
t_y2 = linspace( t1, t2, 101)
x_y2 = vectorize(x_next)(t_y2-t1)
v_y2 = vectorize(v_next)(t_y2-t1)
print(x_next(t2-t1))
figure(5)
plot(t_el,x_el, t_y1,x_y1, t_y2, x_y2,
(0,0.25),(xy,xy),'--b',
(t_yield,t_yield),(0,0.0699),'--b')
xlabel("Time, s")
ylabel("Displacement, m")
# ------------------------------
figure(6)
plot(t_el, v_el, t_y1, v_y1, t_y2, v_y2)
xlabel("Time, s")
ylabel("Velocity, m/s")
x0 = x_next(t2-t1) ; v0 = 0.0
x_next, v_next = resp_elas(mass,damp,k, 0.0,0.0,w, k*x0-fy, x0,v0)
t_e2 = linspace(t2,4.0,201)
x_e2 = vectorize(x_next)(t_e2-t2)
v_e2 = vectorize(v_next)(t_e2-t2)
# ------------------------------
figure(7) ;
plot(t_el, x_el, '-b',
t_y1, x_y1, '-r',
t_y2, x_y2, '-r',
t_e2, x_e2, '-b',
(0.6, 4.0), (x0-xy, x0-xy), '--y')
title("In blue: elastic phases.\n"+
"In red: yielding phases.\n"+
"Dashed: permanent plastic deformation.")
xlabel("Time, s")
ylabel("Displacement, m")
def make_p(p0,t1):
make_p(p0,t1) returns a 1/2 sine impulse load function, p(t)
def p(t):
""
if t<t1:
return p0*sin(t*pi/t1)
else:
return 0.0
return p
def make_kt(k,fy):
"make_kt(k,fy) returns a function kt(u,v,up) returning kt, up"
def kt(u,v,up):
f=k*(u-up)
if (-fy)<f<fy: return k,up
if fy<=f and v>0: up=u-uy;return 0,up
if fy<=f and v<=0: up=u-uy;return k,up
if f<=(-fy) and v<0: up=u+uy;return 0,up
else: up=u+uy;return k,up
return kt
# Exercise from lesson 04
#
mass = 1000.00 # kilograms
k = 40000.00 # Newtons per metre
zeta = 0.03 # zeta is the damping ratio
fy = 2500.00 # yelding force, Newtons
t1 = 0.30 # half-sine impulse duration, seconds
p0 = 6000.00 # half-sine impulse peak value, Newtons
uy = fy/k # yelding displacement, metres
# using the above constants, define the loading function
p=make_p(p0,t1)
# the following function, given the final displacement, the final
# velocity and the initial plastic deformation returns a) the tangent
# stiffness b) the final plastic deformation
kt=make_kt(k,fy)
# we need the damping coefficient "c", to compute its value from the
# damping ratio we must first compute the undamped natural frequency
wn=sqrt(k/mass) # natural frequency of the undamped system
damp=2*mass*wn*zeta # the damping coefficient
# the time step
h=0.005
# required duration for the response
t_end = 4.0
# the number of time steps to arrive at t_end
nsteps=int((t_end+h/100)/h)+1
# the maximum number of iterations in the Newton-Raphson procedure
maxiters = 30
# using the constant acceleration algorithm
# below we define the relevant algorithmic constants
gamma=0.5
beta=1./4.
gb=gamma/beta
a=mass/(beta*h)+damp*gb
b=0.5*mass/beta+h*damp*(0.5*gb-1.0)
t0=0.0
u0=0.0
up=0.0
v0=0.0
p0=p(t0)
(k0, up)=kt(u0,v0,up)
a0=(p0-damp*v0-k0*(u0-up))/mass
time = []; disp = []
for i in range(nsteps):
time.append(t0); disp.append(u0)
# advance time, next external load value, etc
t1 = t0 + h
p1 = p(t1)
Dp = p1 - p0
Dp_= Dp + a*v0 + b*a0
k_ = k0 + gb*damp/h + mass/(beta*h*h)
# we prepare the machinery for the modified Newton-Raphson
# algorithm. if we have no state change in the time step, then the
# N-R algorithm is equivalent to the standard procedure
u_init=u0; v_init=v0 # initial state
f_spring=k*(u0-up) # the force in the spring
DR=Dp_ # the unbalanced force, initially equal to the
# external load increment
for j in range(maxiters):
Du=DR/k_ # the disp increment according to the initial stiffness
u_next = u_init + Du
v_next = v_init + gb*Du/h - gb*v_init + h*(1.0-0.5*gb)*a0
# we are interested in the total plastic elongation
oops,up=kt(u_next,v_next,up)
# because we need the spring force at the end
# of the time step
f_spring_next=k*(u_next-up)
# so that we can compute the fraction of the
# incremental force that's equilibrated at the
# end of the time step
df=f_spring_next-f_spring+(k_-k0)*Du
# and finally the incremental forces unbalanced
# at the end of the time step
DR=DR-df
# finish updating the system state
u_init=u_next; v_init=v_next; f_spring=f_spring_next
# if the unbalanced load is small enough (the
# criteria used in practical programs are
# energy based) exit the loop - note that we
# have no plasticization/unloading DR==0 at the
# end of the first iteration
if abs(DR)<fy*1E-6: break
# now the load increment is balanced by the spring force and
# increments in inertial and damping forces, we need to compute the
# full state at the end of the time step, and to change all
# denominations to reflect the fact that we are starting a new time step.
Du=u_init-u0
Dv=gamma*Du/(beta*h)-gamma*v0/beta+h*(1.0-0.5*gamma/beta)*a0
u1=u0+Du ; v1=v0+Dv
k1,up=kt(u1,v1,up)
a1=(p(t1)-damp*v1-k*(u1-up))/mass
t0=t1; v0=v1; u0=u1 ; a0=a1 ; k0=k1 ; p0=p1
figure(8)
plot(time[::4],disp[::4],'xr')
plot(t_el, x_el, '-b',
t_y1, x_y1, '-r',
t_y2, x_y2, '-r',
t_e2, x_e2, '-b',
(0.6, 4.0), (x0-xy, x0-xy), '--y')
title("Continuous line: exact response.\n"+
"Red crosses: constant acceleration + MNR.\n")
xlabel("Time, s")
ylabel("Displacement, m");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plastic response
Step2: An utility function
Step3: The system parameters
Step4: Derived quantities
Step5: Load definition
Step6: The actual computations
Step7: Yielding time is
Step8: Forced response in elastic range is
Step9: Preparing for EP response
Step10: now, the load must be expressed in function of a restarted time,
Step11: Now we generate the displacement and velocity functions for the yielded phase,
Step12: At this point I must confess that I have already peeked the numerical solution,
Step13: In the next phase, still it is $\dot x> 0$ so that
Step14: Elastic unloading
Step15: now we are ready to plot the whole response
Step17: Numerical solution
Step18: and also a function that, given the displacement,
Step19: Problem data
Step20: Initialize the algorithm
Step21: System state initialization
Step22: Iteration
Step23: Plotting our results
|
7,831
|
<ASSISTANT_TASK:>
Python Code:
import frame_methods
import engine_methods as em
import itertools
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
import numpy as np
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
### Setup frame environment
y_centerline = 200
left_buffer = 0
base_bom = frame_methods.read_bom(bom_name = "bom1.json")
part_shelf = base_bom.items
frame = frame_methods.frame_layout(bom_frame_spec = base_bom.frame_spec, frame_width = 35)
# Define shape variable
# Get theoretical max-forward CG_x:
min_length = em.get_min_collapsible_x(part_shelf, strategy = 'avg')
print("Min Length:", min_length)
# Plot scoring method
CG_x = np.arange(0, 500, 0.01)
CG_x_precalc = (CG_x - min_length) / (frame.frame_length - min_length)
@interact(CG_x_shape=(0.05, 1, 0.05))
def plt_CG_x(CG_x_shape = 0.3):
CG_x_score = np.exp(-CG_x_precalc / CG_x_shape)
plt.plot(CG_x, CG_x_score)
plt.plot((min_length, min_length), (0, max(CG_x_score)), 'red')
plt.show()
def plt_CG_x_logit(s, min_length):
u = min_length + (frame.frame_length / 10)
CG_x_logit = 1 - (1 / (1 + np.exp(-(CG_x- u)/s)))
plt.plot(CG_x, CG_x_logit)
plt.plot((min_length, min_length), (0, max(CG_x_logit)), 'red')
plt.show()
interact(plt_CG_x_logit, s = (1, 500, 1))
@interact(CG_y_shape = (0.05, 1, 0.05))
def plt_CG_Y(CG_y_shape = 1.0):
rv = norm(loc = 0.0, scale = frame.frame_width * CG_y_shape)
x = np.arange(-60, 60, .1)
CG_y_score = rv.pdf(x) / max(rv.pdf(x))
#plot the pdfs of these normal distributions
plt.plot(x, CG_y_score)
plt.show()
Collisions are scored on an exponential curve, dictated by a shape parameter
@interact(collision_shape = (0.01, 1, 0.01))
def plt_col(collision_shape = 0.15):
collisions = np.arange(0, len(part_shelf), 1)
collision_score = np.exp(-(collisions/len(part_shelf))/collision_shape)
plt.plot(collisions, collision_score)
plt.show()
seconds = np.arange(0, 300, 1/30)
time_penalty = seconds * -0.001
plt.plot(seconds, time_penalty)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We must first setup a sample environment with a frame and components.
Step2: Scoring
Step3: Weight Distribution- Y
Step4: Collisions
Step5: Time
|
7,832
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.time import Time
import astropy.units as u
from astropy.coordinates import EarthLocation
import pytz
import datetime
from astroplan import Observer
# Set up an observer at ~Subaru
location = EarthLocation.from_geodetic(-155.4*u.deg, 19.8*u.deg)
obs = Observer(location=location, timezone=pytz.timezone('US/Hawaii'))
# Pick a local (Hawaii) time to observe: midnight
local_naive_datetime = datetime.datetime(2015, 7, 14, 0)
# What is the astropy.time.Time equivalent for this datetime?
astropy_time = obs.datetime_to_astropy_time(local_naive_datetime)
print('astropy.time.Time (UTC):', astropy_time)
localized_datetime = obs.astropy_time_to_datetime(astropy_time)
print('datetime:', localized_datetime)
print('new datetime equivalent to original naive datetime?:',
local_naive_datetime == localized_datetime.replace(tzinfo=None))
east_coast_datetime = pytz.timezone('US/Eastern').localize(datetime.datetime(2015, 7, 14, 6))
east_coast_astropy_time = obs.datetime_to_astropy_time(east_coast_datetime)
print('Convert local East Coast time to UTC:', east_coast_astropy_time)
print('Equivalent to original astropy time?:', east_coast_astropy_time == astropy_time)
tzinfo_kwarg = datetime.datetime(2015, 7, 14, 6, tzinfo=pytz.timezone('US/Eastern'))
localized = pytz.timezone('US/Eastern').localize(datetime.datetime(2015, 7, 14, 6))
print('with tz assigned in kwarg:', tzinfo_kwarg)
print('with localization by tz.localize(dt):', localized)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convert that astropy.time.Time back to a localized datetime, arriving back at the original datetime (only this one is localized)
Step2: Let's say the Subaru observer is remotely observing from the East Coast. Let's convert their local time (Eastern) to an astropy time. Since this datetime is localized, datetime_to_astropy_time will use the datetime's timezone (rather than assuming self.timezone)
Step3: Warning
|
7,833
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.ensemble import AdaBoostClassifier as AdaBoost
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.metrics import hamming_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.decomposition import PCA
from sklearn import preprocessing
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from gensim import corpora, models
import gensim
import string
import time
from datetime import datetime
from __future__ import division
%cd ./Train_Data_Version_1/
features = pd.read_csv('features_V1.csv')
features.head(5)
labels = pd.read_csv('multilabels.csv')
labels.head(5)
#delete text only data
features = features.drop(features.columns[[0, 1, 2, 3, 5, 6, 12]], axis=1)
features.head(5)
#convert release date string to numeric year
rd = features['release_date'].str[:4]
rd = pd.to_numeric(rd)
features['release_date'] = rd
features.head()
# Count missing data to see how many movies we lose by dropping NAs:
features.shape[0] - features.dropna().shape[0]
#make a df with no important features missing
feat2 = pd.concat([features, labels], axis=1)
feat2 = feat2.dropna()
labels2 = feat2.ix[:,514:521]
#drop adult since it is always false
del feat2['adult']
#make a second df with imputed mean values for nonsensical data in quantitative variables
feat3 = feat2.iloc[:,0:9].values
feat3 = feat3.astype('float')
feat3[feat3 == 0] = np.nan
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
feat3 = imp.fit_transform(feat3)
feat3 = pd.DataFrame(feat3, columns = list(feat2)[0:9])
feat3 = pd.concat([feat3, feat2.iloc[:,10:520]], axis=1).dropna()
feat3.head()
# Pop labels off of imputed dataset
labels3 = feat3.ix[:,512:519]
feat3 = feat3.drop(feat3.columns[[512, 513, 514, 515, 516, 517, 518]], axis=1)
feat2 = feat2.drop(feat2.columns[[513, 514, 515, 516, 517, 518, 519]], axis=1)
#split our data into training and testing
X_train, X_test, y_train, y_test = train_test_split(feat2, labels2, test_size=0.33, random_state=42)
# ----------------------------
# Standardize features
scaler = preprocessing.StandardScaler().fit(X_train)
# Call: scaler.transform(X_train) to scale
# ----------------------------
from sklearn.naive_bayes import GaussianNB
# Do multi-label NB
nb_mdl = OneVsRestClassifier(GaussianNB(), n_jobs=-1)
nb_mdl.fit(X_train, y_train)
# Test uncalibrated NB performance
nb_pred = nb_mdl.predict(X_test)
hamming_loss(y_test, nb_pred)
from sklearn.calibration import CalibratedClassifierCV
nb_sigmoid = CalibratedClassifierCV(GaussianNB(), method='sigmoid')
nb_mdl = OneVsRestClassifier(nb_sigmoid, n_jobs=-1)
nb_mdl.fit(X_train, y_train)
nb_cal_proba = nb_mdl.predict_proba(X_test)
loss = []
# Choose optimal threshold by manual ROC
for thresh in np.linspace(0.01, 0.99, 99):
pred = np.array(nb_cal_proba > thresh).astype(int)
loss.append(hamming_loss(y_test, pred))
plt.plot(np.linspace(0.01, 0.99, 99), loss)
plt.xlabel('threshold'); plt.ylabel('Hamming Loss')
print 'Minumum Hamming loss: {loss}'.format(loss=min(loss))
print 'Loss when zero: {loss}'.format(loss=hamming_loss(y_test, np.zeros(np.shape(y_test))))
from sklearn.linear_model import LogisticRegression
# Now do LR - we'll use the SGD solver since the dataset is so large
lr_mdl = OneVsRestClassifier(LogisticRegression(class_weight='balanced', C=100, solver='sag'), n_jobs=-1)
lr_mdl.fit(scaler.transform(X_train), y_train)
# Test LR performance
lr_pred = lr_mdl.predict(scaler.transform(X_test))
hamming_loss(y_test, lr_pred)
# Let's look at the first handful of genre predictions
lr_pred[:10]
# implementation of LR using PCA to see if this results in better prediction
# start with 400 PCs, will only take the ones that explain ~90% of variance
pca = PCA(n_components=400)
pca.fit(scaler.transform(X_train))
# show the number of PCs that explain at least 90% of the variance
var90pcs = len(pca.explained_variance_ratio_[np.cumsum(pca.explained_variance_ratio_)<.9])
print var90pcs
print np.cumsum(pca.explained_variance_ratio_)[var90pcs]
# reduce data using PCA
X_train_reduced = pca.transform(scaler.transform(X_train))
print X_train_reduced.shape
X_test_reduced = pca.transform(scaler.transform(X_test))
print X_test_reduced.shape
# Now the same LR as above but with 327 principal components
lr_mdl_pca = OneVsRestClassifier(LogisticRegression(class_weight='balanced', C=100, solver='sag'), n_jobs=-1)
lr_mdl_pca.fit(X_train_reduced[:,0:var90pcs], y_train)
# Test LR performance
lr_pred_pca = lr_mdl_pca.predict(X_test_reduced[:,0:var90pcs])
hamming_loss(y_test, lr_pred_pca)
#run a tuned RFC on full data set and calculate Hamming loss
# **tuning parameters chosen previously using code in the Appendix
rfc_mdl = RFC(n_estimators=120, max_depth=60, class_weight ='balanced', n_jobs=-1).fit(X_train,y_train)
rf_pred = rfc_mdl.predict(X_test)
hamming_loss(y_test, rf_pred)
#calculate the overall accuracy of our predictions
rfc_mdl.score(X_test, y_test)
#run a tuned RFC on our data that has been reduced using PCA and calculate Hamming loss
rfc_mdl_pca = RFC(n_estimators=120, max_depth=60, class_weight = 'balanced').fit(X_train_reduced[:,0:var90pcs],y_train)
pred_pca = rfc_mdl_pca.predict(np.array(X_test_reduced[:,0:var90pcs]))
hamming_loss(y_test, pred_pca)
#visualize by-genre accuracy of predictions
def plot_acc_by_genre(pred, y_test, clf_str=''):
preds = pd.DataFrame(pred)
acc = []
for i in range(7):
a = np.array(y_test[[i]])
b = np.array(preds[[i]])
acc.append(np.count_nonzero(a==b) / preds.shape[0] * 100)
plt.figure(figsize=(10,5))
index = [0,1,2,3,4,5,6]
plt.bar(index,acc,align="center")
plt.xticks(index, ('Action', 'Drama', 'Comedy', 'Family', 'Romance', 'Documentary', 'Horror'))
plt.ylim([0,100])
plt.grid(axis='y')
plt.title(clf_str + ' Accuracy by Genre')
plt.ylabel('Accuracy')
plt.show()
plot_acc_by_genre(lr_pred, y_test, 'Logistic Regression')
plot_acc_by_genre(rf_pred, y_test, 'Random Forest')
#split our imputed data df into training and testing
X_train3, X_test3, y_train3, y_test3 = train_test_split(feat3, labels3, test_size=0.33, random_state=42)
#run a RFC on our data with imputed data
imputedmdl = RFC(n_estimators=120, max_depth=60, class_weight = 'balanced', n_jobs=-1).fit(X_train3,y_train3)
pred3 = imputedmdl.predict(X_test3)
hamming_loss(y_test3, pred3)
#build several dfs with different predictors
a = X_train.ix[:,0:9] #just the basics
b = X_train.ix[:,0:132] #include original language
c = X_train.ix[:,0:365] #include production countries
d = X_train #include spoken language
e = X_test.ix[:,0:9] #just the basics
f = X_test.ix[:,0:132] #include original language
g = X_test.ix[:,0:365] #include production countries
h = X_test #include spoken language
#run models on each df
dfs_train = [a,b,c,d]
dfs_test = [e,f,g,h]
loss = []
for i in range(4):
mdl = RFC(n_estimators=120, max_depth=60, class_weight = 'balanced', n_jobs=-1).fit(dfs_train[i],y_train)
pred = mdl.predict(dfs_test[i])
loss.append(hamming_loss(y_test, pred))
print loss
plt.figure(figsize=(10,5))
index = [0,1,2,3]
plt.bar(index,loss,align="center")
plt.xticks(index, ('Basics', '+ Original language', '+ Production Countries', '+ Spoken Language'))
plt.ylim([0,0.2])
plt.grid(axis='y')
plt.title('Random Forest with Different Attributes of Films')
plt.ylabel('Hamming loss')
plt.show()
importances = rfc_mdl.feature_importances_
std = np.std([tree.feature_importances_ for tree in rfc_mdl.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), indices)
plt.xlim([-1, 15])
plt.show()
# Print the full feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Tuning for LR model
loss = []
for tuning_param in np.array([0.001, 0.01, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0 100]):
lr_mdl = OneVsRestClassifier(LogisticRegression(class_weight ='balanced', C=tuning_param, solver='sag'), n_jobs=-1)
lr_mdl.fit(scaler.transform(X_train), y_train)
# Test LR performance
lr_pred = lr_mdl.predict(scaler.transform(X_test))
loss.append(hamming_loss(y_test, lr_pred))
plt.plot([0.001, 0.01, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 100], loss)
plt.xlabel('regularization tuning parameter'); plt.ylabel('Hamming loss')
# Tuning for random forest model
n_folds = 5
kf = KFold(n_splits=n_folds)
# Parameters for tuning a random forest model
n_trees = np.arange(10, 100, 20) # Trees and depth are explored on an exponentially growing space,
depths = np.arange(2, 10) # since it is assumed that trees and depth will add accuracy in a decaying fashion.
# To keep track of the best model
best_score = 1
# Run grid search for model with 5-fold cross validation
print '5-fold cross validation:'
for trees in n_trees:
for depth in depths:
loss = []
for ktrain, ktest in kf.split(feat2):
mdl = RFC(n_estimators=trees, max_depth=depth).fit(feat2.iloc[ktrain,:],labels2.iloc[ktrain,:])
pred = mdl.predict(feat2.iloc[ktest,:])
loss.append(hamming_loss(labels2.iloc[ktest,:], pred))
# Record and report probability
average_loss = np.mean(loss)
# Record and report accuracy
print "Trees:", trees, "Depth:", depth, "Loss:", average_loss
# Update our record of the best parameters seen so far
if average_loss < best_score:
best_score = average_loss
best_trees = trees
best_depth = depth
print 'Best number of trees, depth:', best_trees, ',', best_depth
# Go higher in trees and depth
# Parameters for tuning a random forest model
depths = np.arange(50, 80, 10)
# To keep track of the best model
best_score = 1
kf = KFold(n_splits=3)
# Run grid search for model with 5-fold cross validation
print '3-fold cross validation:'
for trees in n_trees:
for depth in depths:
loss = []
for ktrain, ktest in kf.split(feat2):
mdl = RFC(n_estimators=160, max_depth=depth, class_weight = 'balanced').fit(feat2.iloc[ktrain,:],labels2.iloc[ktrain,:])
pred = mdl.predict(feat2.iloc[ktest,:])
loss.append(hamming_loss(labels2.iloc[ktest,:], pred))
# Record and report probability
average_loss = np.mean(loss)
# Record and report accuracy
print "Trees:", trees, "Depth:", depth, "Loss:", average_loss
# Update our record of the best parameters seen so far
if average_loss < best_score:
best_score = average_loss
best_trees = trees
best_depth = depth
print 'Best number of trees, depth:', best_trees, ',', best_depth
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's begin by loading and examining our raw dataset, containing data obtained through the TMDB API and saved previously as a CSV file.
Step2: Next we'll load our previously dummy-coded genre labels and verify they look as expected.
Step3: Now, we begin preparing the data for model fitting. The first step in this process is to remove or reformat features that are not suitable, i.e. numeric.
Step4: There are a decent number of movies that are missing data for some of the features. Since we can't fit our models with NaN values present, we can choose to either remove these movies, or impute values for the missing features. Let's see how many movies we lose by dropping observations with any missing values.
Step5: So we'll lose almost 12K movies by dropping those with any missing values. Given the large size of our datset, this doesn't seem unmanageable.
Step6: We also need to worry about features that have erroneous or nonsensical values, e.g. a budget of $0.00. These erroneous values could have a negative influence on the model, if they're not dealt with. So we'll create a new dataset with erroneous values replaced by the mean, with which we'll compare the non-altered dataset later.
Step7: Model fitting and comparison
Step8: General strategy
Step9: We can see that the NB model appears to do a decent job right out of the box, with a Hamming loss of 0.31. This means that roughly 69% of our genre labels are predicted correctly for the test set. Since each genre assignment is a 50/50 choice, this would seem to be a large improvement over a random classifier.
Step10: Surprisingly, it seems like the NB classifier does best (Hamming loss = 0.213) at the higest thresholds - when it predicts everything as 0, i.e. no genre assigned. In fact, it turns out that the best performance achievable here is what we would achieve by assigning no genre to any movie - the "all null" model (Hamming loss = 0.215). This is clearly not the desired effect, since prediction of no genre is completely unhelpful. We could try to impose a constraint on the model that at least one genre is always chosen, e.g. applying a softmax transformation, but this assumes labels are mutually exclusive, so it can't handle multi-label scenarios like this one. To circumvent these issues, we decided to try something a bit different, and turned to logistic regression.
Step11: We can see that our untuned LR does worse than the NB model and how we would do by simply predicting all zeros for the genres (Hamming loss of 0.350 vs. 0.215), but at least it does assign movies at least one genre. Presumably, performance would improve with some finer tuning of the regularization strength, but it seems doubtful that we will get too far with this model and this dataset.
Step12: We can see that 90% of the variance is explained by the first 327 PCs, so we will fit our LR using the first 327 PCs and compare the results to the model using the entire dataset above.
Step13: We can see that the Hamming loss is pretty similar to that achieved using the full data set, only slightly worse. This indicates that the features aren't very compressible, i.e. information is distributed widely across features. It is possible that we will will see gains by implementing a more flexible classifier, so we decided to fit an RFC, and compare results with those above.
Step14: We can see that we achieve a significantly better Hamming score with RFC (0.17) than with NB or LR. This means that roughly 83% of our genre labels are predicted correctly for the test set.
Step15: We also compared the Hamming score metric with a more traditional overall accuracy metric. From the sklearn documentation
Step16: About 82% of the genres are predicted correctly using PCA with 327 PCs. This is pretty comparable to the RFC above that uses the full dimension dataset. Perhaps the slight loss in accuracy is offset by the reduction in time it takes to fit the random forest, although this is a subjective judgment.
Step17: For the LR, accuracy is pretty similar across action, drama, comedy, and horror, while family and documentary genres are predicted with slightly higher fidelity.
Step18: This means that roughly 80% of our genre labels are predicted correctly for the test set when using the dataset with imputed data. As this performance is worse than leaving erroneous value in place, we will continue to test other methods of handling missing / nonsensical data.
Step19: As we can see, ading all our language and country predictors as indicator variables leads to a 1% improvement in classifier performance. This is not a huge gain, but perhaps worth the added overhead.
Step20: As we can see from the above visualization, seven features proved the most important in determining the movie genres for our Random Forest Classifier. The top nine contributors came from our "basic" core of quanitiative data scraped from TMDb and are, in order
Step21: Appendix
|
7,834
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sys, os
sys.path.insert(0, os.path.expanduser('~/work/git/github/taku-y/pymc3'))
import theano
theano.config.floatX = 'float64'
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
data = pd.read_csv('../data/radon.csv')
county_names = data.county.unique()
county_idx = data['county_code'].values
n_counties = len(data.county.unique())
with pm.Model() as hierarchical_model:
# Hyperpriors for group nodes
mu_a = pm.Normal('mu_alpha', mu=0., sd=100**2)
sigma_a = pm.Uniform('sigma_alpha', lower=0, upper=100)
mu_b = pm.Normal('mu_beta', mu=0., sd=100**2)
sigma_b = pm.Uniform('sigma_beta', lower=0, upper=100)
# Intercept for each county, distributed around group mean mu_a
# Above we just set mu and sd to a fixed value while here we
# plug in a common group distribution for all a and b (which are
# vectors of length n_counties).
a = pm.Normal('alpha', mu=mu_a, sd=sigma_a, shape=n_counties)
# Intercept for each county, distributed around group mean mu_a
b = pm.Normal('beta', mu=mu_b, sd=sigma_b, shape=n_counties)
# Model error
eps = pm.Uniform('eps', lower=0, upper=100)
# Model prediction of radon level
# a[county_idx] translates to a[0, 0, 0, 1, 1, ...],
# we thus link multiple household measures of a county
# to its coefficients.
radon_est = a[county_idx] + b[county_idx] * data.floor.values
# Data likelihood
radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)
with hierarchical_model:
means, sds, elbos = pm.variational.advi(n=100000)
# Inference button (TM)!
with hierarchical_model:
#start = pm.find_MAP()
step = pm.NUTS(scaling=means)
hierarchical_trace = pm.sample(5000, step, start=means, progressbar=False)
from scipy import stats
import seaborn as sns
varnames = means.keys()
fig, axs = plt.subplots(nrows=len(varnames), figsize=(12, 18))
for var, ax in zip(varnames, axs):
mu_arr = means[var]
sigma_arr = sds[var]
ax.set_title(var)
for i, (mu, sigma) in enumerate(zip(mu_arr.flatten(), sigma_arr.flatten())):
sd3 = (-4*sigma + mu, 4*sigma + mu)
x = np.linspace(sd3[0], sd3[1], 300)
y = stats.norm(mu, sigma).pdf(x)
ax.plot(x, y)
if hierarchical_trace[var].ndim > 1:
t = hierarchical_trace[var][i]
else:
t = hierarchical_trace[var]
sns.distplot(t, kde=False, norm_hist=True, ax=ax)
fig.tight_layout()
pm.traceplot(hierarchical_trace[500:]);
# Non-hierarchical model runs
selection = ['CASS', 'CROW WING', 'FREEBORN']
indiv_traces = {}
for county_name in selection:
# Select subset of data belonging to county
c_data = data.ix[data.county == county_name]
c_log_radon = c_data.log_radon
c_floor_measure = c_data.floor.values
with pm.Model() as individual_model:
# Intercept prior (variance == sd**2)
a = pm.Normal('alpha', mu=0, sd=100**2)
# Slope prior
b = pm.Normal('beta', mu=0, sd=100**2)
# Model error prior
eps = pm.Uniform('eps', lower=0, upper=100)
# Linear model
radon_est = a + b * c_floor_measure
# Data likelihood
radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps, observed=c_log_radon)
# Inference button (TM)!
trace = pm.sample(2000, progressbar=False)
# keep trace for later analysis
indiv_traces[county_name] = trace
fig, axis = plt.subplots(1, 3, figsize=(12, 6), sharey=True, sharex=True)
axis = axis.ravel()
for i, c in enumerate(selection):
c_data = data.ix[data.county == c]
c_ind = np.where(county_names==c)[0][0]
c_data = c_data.reset_index(drop = True)
z = list(c_data['county_code'])[0]
xvals = np.linspace(-0.2, 1.2)
for a_val, b_val in zip(indiv_traces[c]['alpha'][500:], indiv_traces[c]['beta'][500:]):
axis[i].plot(xvals, a_val + b_val * xvals, 'b', alpha=.1)
axis[i].plot(xvals, indiv_traces[c]['alpha'][500:].mean() + indiv_traces[c]['beta'][500:].mean() * xvals,
'b', alpha=1, lw=2., label='individual')
for a_val, b_val in zip(hierarchical_trace['alpha'][500:][z], hierarchical_trace['beta'][500:][z]):
axis[i].plot(xvals, a_val + b_val * xvals, 'g', alpha=.1)
axis[i].plot(xvals, hierarchical_trace['alpha'][500:][z].mean() + hierarchical_trace['beta'][500:][z].mean() * xvals,
'g', alpha=1, lw=2., label='hierarchical')
axis[i].scatter(c_data.floor + np.random.randn(len(c_data))*0.01, c_data.log_radon,
alpha=1, color='k', marker='.', s=80, label='original data')
axis[i].set_xticks([0,1])
axis[i].set_xticklabels(['basement', 'no basement'])
axis[i].set_ylim(-1, 4)
axis[i].set_title(c)
if not i%3:
axis[i].legend()
axis[i].set_ylabel('log radon level')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting the hierarchical model trace -its found values- from 500 iterations onwards (right side plot) and its accumulated marginal values (left side plot)
Step2: The marginal posteriors in the left column are highly informative. mu_a tells us the group mean (log) radon levels. mu_b tells us that having no basement decreases radon levels significantly (no mass above zero). We can also see by looking at the marginals for a that there is quite some differences in radon levels between counties (each 'rainbow' color corresponds to a single county); the different widths are related to how much confidence we have in each paramter estimate -- the more measurements per county, the higher our confidence will be.
|
7,835
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'gfdl-esm4', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
DOC.set_value("Other: troposphere")
DOC.set_value("mesosphere")
DOC.set_value("stratosphere")
DOC.set_value("whole atmosphere")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Lumped higher hydrocarbon species and oxidation products, parameterized source of Cly and Bry in stratosphere, short-lived species not advected")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(82)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
DOC.set_value("Operator splitting")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(30)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(30)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
DOC.set_value("Anthropogenic")
DOC.set_value("Other: bare ground")
DOC.set_value("Sea surface")
DOC.set_value("Vegetation")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("CO, CH2O, NO, C3H6, isoprene, C2H6, C2H4, C4H10, terpenes, C3H8, acetone, CH3OH, C2H5OH, H2, SO2, NH3")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("DMS")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
DOC.set_value("Aircraft")
DOC.set_value("Biomass burning")
DOC.set_value("Lightning")
DOC.set_value("Other: volcanoes")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("CO, CH2O, NO, C3H6, isoprene, C2H6, C2H4, C4H10, terpenes, C3H8, acetone, CH3OH, C2H5OH, H2, SO2, NH3")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("CH4, N2O")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
DOC.set_value("Bry")
DOC.set_value("Cly")
DOC.set_value("H2O")
DOC.set_value("HOx")
DOC.set_value("NOy")
DOC.set_value("Other: sox")
DOC.set_value("Ox")
DOC.set_value("VOCs")
DOC.set_value("isoprene")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(157)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(21)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(19)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
DOC.set_value("Bry")
DOC.set_value("Cly")
DOC.set_value("NOy")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
DOC.set_value("NAT (Nitric acid trihydrate)")
DOC.set_value("Polar stratospheric ice")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(3)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("3")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
DOC.set_value("Sulphate")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(39)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
DOC.set_value("Offline (with clouds)")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
7,836
|
<ASSISTANT_TASK:>
Python Code:
boys = int(input('How many boys are in the class: '))
girls = int(input('How many girls are in the class:'))
pupils = boys + girls
print('There are', pupils,'in the class altogether')
bigger_number = 12
smaller_number = 10
difference = bigger_number - smaller_number
print('The difference between', bigger_number, 'and', smaller_number, 'is', difference)
number1 = 5
number2 = 6
answer = number1 * number2
print(answer)
big_number = 100
divisor_number = 25
dividend_answer = 100/25
print(big_number,'divided by', divisor_number, 'is', dividend_answer)
big_number = 102
divisor_number = 25
remainder = 100%25
print('If you divide', big_number,'by', divisor_number, 'you get', remainder, 'left over')
phrase1 = 'The quick brown fox jumped over'
phrase2 = 'the moon'
sentence = phrase1+phrase2
print(sentence)
noun1 = 'turnip'
noun2 = 'elephant'
noun3 = 'worm'
noun4 = 'holiday'
noun5 = 'Scalloway'
verb1 = 'went'
verb2 = 'ate'
verb3 = 'sat'
verb4 = 'jumped'
preposition1 = 'on'
preposition2 = 'to'
preposition3 = 'with'
def_article = 'the'
indef_article = 'a'
example1 = def_article+' '+noun1+' '+verb1+' '+preposition1+' '+noun4+' '+preposition2+' '+noun5
example2 = 'change this line'
print(example1)
print(example2)
noun1 = 'turnip'
noun2 = 'elephant'
noun3 = 'worm'
noun4 = 'holiday'
noun5 = 'Scalloway'
verb1 = 'went'
verb2 = 'ate'
verb3 = 'sat'
verb4 = 'jumped'
preposition1 = 'on'
preposition2 = 'to'
preposition3 = 'with'
def_article = 'the'
indef_article = 'a'
print(def_article,noun1,verb1,preposition1,noun4,preposition2,noun5)
print('replace this with your own choice of variables')
noun1 = 'turnip'
noun2 = 'elephant'
noun3 = 'worm'
noun4 = 'holiday'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Subtraction
Step2: Multiplication
Step3: Division
Step4: Extra Useful Bit
Step5: Working with Strings - aka Text
Step6: Change line 3 of the above program to put in a space
Step7: Getting Rid of all the ' ' Space Bits
Step8: How Long is a Piece of String
|
7,837
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'messy-consortium', 'sandbox-1', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
7,838
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import urllib2
from __future__ import division
plt.style.use('ggplot')
np.random.seed(1)
url = ('https://raw.githubusercontent.com/Upward-Spiral-Science'
'/data/master/syn-density/output.csv')
data = urllib2.urlopen(url)
csv = np.genfromtxt(data, delimiter=",",dtype='int')[1:] # don't want first row (labels)
data = csv
print data[::1000]
sizes = [len(np.unique(data[:, i])) for i in range(3)]
ranges = [(np.max(data[:, i]), np.min(data[:,i])) for i in range(3)]
ranges_diff = [np.max(data[:, i])-np.min(data[:,i]) for i in range(3)]
print np.max(data[:,3])
print sizes
print ranges
print ranges_diff
for i, ax in zip(range(3), ['x', 'y', 'z']):
print ax + '-axis: '
print 'unique bins (in data): ', np.unique(data[:, i]).size
print np.unique(data[:, i])
print
xPix = 135424
yPix = 119808
xPixPerBin = xPix/108.0
yPixPerBin = yPix/86.0
print xPixPerBin, yPixPerBin
# Now since each bin is 40 data coordinates we can define the following function to convert from coordinates to pixels.
def coords_to_px(xcoord, ycoord):
c_vec = np.array([xcoord, ycoord], dtype='float')
c_vec /= 39.0
return (c_vec[0]*xPixPerBin, c_vec[1]*yPixPerBin)
# check that max coordinate values are close to max pixels
print coords_to_px(4192, 3358)
# how big is a bin? (just a sanity check, should obviously
# be identical to xPixPerBin and yPixPerBin)
print coords_to_px(39.0, 39.0)
def get_tilenums_at_res(xcoord, ycoord, res):
x, y = coords_to_px(xcoord, ycoord)
x = np.floor(float(x)/(512*(2**res)))
y = np.floor(float(y)/(512*(2**res)))
return x,y
def get_image_url1(xcoord, ycoord, res, z):
x, y = get_tilenums_at_res(xcoord, ycoord, res)
x = int(x)
y = int(y)
end = '/'+reduce(lambda x, y: str(x) +'_'+str(y), [y, x, res])
end += '.png'
imgurl = 'http://openconnecto.me/ocp/catmaid/bock11/image/xy/'+str(z) +end
return imgurl
print get_image_url1(2000, 2000, 0, 2917)
print (2917-4156)*-1
# NOTE: you can copy just this block into your notebook to use the get_image_url() function
xPix = 135424
yPix = 119808
xPixPerBin = xPix/108.0
yPixPerBin = yPix/86.0
max_tiles_x = 264 # found via inspection of viz html/JS code
max_tiles_y = 233 # found via inspection of viz html/JS code
def coords_to_px(xcoord, ycoord):
c_vec = np.array([xcoord, ycoord], dtype='float')
c_vec /= 39.0
return (c_vec[0]*xPixPerBin, c_vec[1]*yPixPerBin)
def get_tilenums_at_res(xcoord, ycoord, res):
x, y = coords_to_px(xcoord, ycoord)
if(res == 0):
x = np.round(float(x)/512)
y = np.round(float(y)/512)
else:
x = np.round(float(x)/(512*(2**res)))
y = np.round(float(y)/(512*(2**res)))
return x,y
def get_image_url(xcoord, ycoord, zcoord, res=1):
params:
- xcoord, ycoord, zcoord all in terms of coordinates in original data file
- res = image resolution, default = 1
since 1024x1024 pixels, approx. the size of a bin--i think
returns: (string) url of image
zcoord += 2917
z = int(zcoord)
x, y = get_tilenums_at_res(int(xcoord), int(ycoord), res)
x = int(x)
y = int(y)
if(x > max_tiles_x//(2**res)): x = max_tiles_x//(2**res)
if(y > max_tiles_y//(2**res)): y = max_tiles_y//(2**res)
end = '/' + reduce(lambda x, y: str(x) +'_'+str(y), [y, x, res])
end += '.png'
imgurl = 'http://openconnecto.me/ocp/catmaid/bock11/image/xy/' + str(z)
return imgurl+end
from IPython.display import Image, HTML, display
disp_dim = {'width': 200, 'height': 200} # just for quickly setting image width/height
m = np.max(data[:, -1])
a = np.where(data[:, -1]==m)
args = list(*data[a, (0, 1, 2)])+[0]
imgs = []
for r in range(3):
args[-1] = r
u = get_image_url(*args)
print u
imgs.append(Image(url=u, **disp_dim))
display(*imgs)
dens_data = np.copy(data).astype(float)
dens_data = dens_data[np.where(dens_data[:,3] != 0)]
dens_data[:, 3] = dens_data[:, 4]/dens_data[:, 3]
dens_data = dens_data[:,:-1]
print np.average(dens_data[:,-1])
a = np.argsort(dens_data[:, -1])
urlsMin, urlsMax = zip(*[(get_image_url(*dens_data[a[i],:-1]),
get_image_url(*dens_data[a[-1-i],:-1]))
for i in range(9)])
tagsMin = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u)
for u in urlsMin ])
tagsMin += '<br> <br>'
tagsMax = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u)
for u in urlsMax ])
display(HTML(tagsMin))
display(HTML(tagsMax))
# get the clean data
x_bounds = (409, 3529)
y_bounds = (1564, 3124)
def check_in_bounds(row, x_bounds, y_bounds):
if row[0] < x_bounds[0] or row[0] > x_bounds[1]:
return False
if row[1] < y_bounds[0] or row[1] > y_bounds[1]:
return False
if row[3] == 0:
return False
return True
indices_in_bound, = np.where(np.apply_along_axis(check_in_bounds, 1, csv, x_bounds, y_bounds))
data_clean = csv[indices_in_bound]
dens_data = np.copy(data_clean).astype(float)
dens_data = dens_data[np.where(dens_data[:,3] != 0)]
dens_data[:, 3] = dens_data[:, 4]/dens_data[:, 3]
dens_data = dens_data[:,:-1]
print np.average(dens_data[:,-1])
a = np.argsort(dens_data[:, -1])
urlsMin, urlsMax = zip(*[(get_image_url(*dens_data[a[i],:-1]),
get_image_url(*dens_data[a[-1-i],:-1]))
for i in range(9)])
tagsMin = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u)
for u in urlsMin ])
tagsMin += '<br> <br>'
tagsMax = ''.join(["<img style='width: 80px; margin: 0px; padding-right: 3px; float: left;' src='%s' />" % str(u)
for u in urlsMax ])
display(HTML(tagsMin))
display(HTML(tagsMax))
from itertools import count
avg_unmasked = np.average(data[:,3])
high_unmasked = data[np.where(data[:,3] > avg_unmasked)]
low_synapses = []
for s in count():
low_synapses = high_unmasked[np.where(high_unmasked[:,-1]==s)]
if low_synapses.size > 0:
print s
break
d_low = low_synapses[0]
print d_low
imgs = []
imgs.append(Image(url=get_image_url(*d_low[:3]), **disp_dim))
max_s = np.max(data[:, 4])
print max_s
high_synapses = []
for s in range(max_s, 0, -1):
high_synapses = high_unmasked[np.where(high_unmasked[:,-1]==s)]
if high_unmasked.size > 0:
print s
break
d_high = high_synapses[0]
print d_high
imgs.append(Image(url=get_image_url(*d_high[:3]), **disp_dim))
display(*imgs)
# zoom in a resolution
display(Image(url=get_image_url(*d_low[:3], res=0), **disp_dim),
Image(url=get_image_url(*d_high[:3], res=0), **disp_dim))
# fairly arbitrarily, look at midpoint for x and z
midx, midz = [np.median(np.unique(data[:, i])) for i in [0,2]]
y = np.min(data[:, 1])
print midx, y, midz
Image(url=get_image_url(midx, y, midz, 2), **disp_dim)
# nothing apparently notable, lets view across entire x-axis
from itertools import groupby
urls = [k for k,_ in groupby(
[get_image_url(x, y, midz, 3)
for x in np.sort(np.unique(data[:, 0]))])]
imgTags = ''.join( ["<img style='width: 20px; margin: 0px; padding-bottom: 3px; float: left;' src='%s' />" % str(u)
for u in urls ])
display(HTML(imgTags))
# y value below cutoff
urls = [k for k,_ in groupby(
[get_image_url(x, 39*10, midz, 3)
for x in np.sort(np.unique(data[:, 0]))])]
imgTags = ''.join( ["<img style='width: 20px; margin: 0px; padding-bottom: 3px; float: left;' src='%s' />" % str(u)
for u in urls ])
display(HTML(imgTags))
# what bins along this y value have unmasked = 0?
for row in data[np.where(data[:, 1] == y)]:
if row[3] == 0: print row
print np.average(data[np.where(data[:, 1] == y+39*2), 3])
# do same thing for z = 1165, since we observe this is where all the unmasked = 0 bins occur
urls = [k for k,_ in groupby(
[get_image_url(x, y, 1165, 3)
for x in np.sort(np.unique(data[:, 0]))])]
imgTags = ''.join( ["<img style='width: 20px; margin: 0px; padding-bottom: 3px; float: left;' src='%s' />" % str(u)
for u in urls ])
display(HTML(imgTags))
a, = np.where(data[:, 3] == 0)
# first unmasked = 0
args = list(data[a[0], (0, 1, 2)])
u = get_image_url(*args)
print u
Image(url=u, **disp_dim)
# middle one
args = list(data[a[a.size//2], (0, 1, 2)])
u = get_image_url(*args)
print u
Image(url=u, **disp_dim)
# zoom out middle
args += [7]
u = get_image_url(*args)
print u
Image(url=u, **disp_dim)
# last
args = list(data[a[-1], (0, 1, 2)])
u = get_image_url(*args)
print u
Image(url=u, **disp_dim)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using image data
Step2: Grabbing images from website
Step3: Now just need to figure out z-axis. The z values in the image data go from 2917-4156, which is a range of
Step5: So it seems that the z-values in the data correspond approximately to the z-values in the image data, other than a translation of 2917. So let's redefine our function, and put it in one code block so its easy for other people to use...
Step6: Testing the image scraper w/ some exploratory questions
Step7: The above should be images of the bin where maximal number of synapses occured. Note changes in resolution let us zoom in and out.
Step8: Black regions most likely are masked regions, thus it is actually not surprising too see large amounts of masked for both high and low density areas (since low unmasked increases density, but at the same time lowers synaptic probability). Furthermore, note that the data is binned across many z-slices, while here, we are only looking at one z-slice at a time, thus it is plausible for a high density bin to have an entire slice masked. This also indicates that it would be beneficial to write a function that computes pixel-wise average across z-slices for a bin and returns the corresponding image. We can also only look at the more 'cleaned' data, as many boundary points are likely to be picked up here.
Step9: How about regions with high unmasked, and low synapses, and high unmasked with high synapses?
Step10: A significant number of the bins were cut off below a given threshold on the y-axis before the data was given to us... What does that line look like?
Step11: First row shows where the data was sliced, second is somewhere before it was sliced (that is, data not included in the set)... Since these black regions probably correspond to regions that are heavily masked, perhaps this is why data split here?
Step12: Let's confirm that black regions do infact correspond to low unmasked.
Step13: Looks like regions where unmasked = 0 are corresponding to edges..
|
7,839
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
import pandexo.engine.justdoit as jdi
exo_dict = jdi.load_exo_dict()
#WASP-43
exo_dict['star']['jmag'] = 9.995 # J magnitude of the system
exo_dict['star']['hmag'] = 9.397 # H magnitude of the system
#WASP-43b
exo_dict['planet']['type'] = 'user' # user specified inputs
exo_dict['planet']['exopath'] = jdi.os.getcwd()+'/WASP43b-Eclipse_Spectrum.txt' # filename for model spectrum
exo_dict['planet']['w_unit'] = 'um' # wavelength unit
exo_dict['planet']['f_unit'] = 'fp/f*' # flux ratio unit (can also put "rp^2/r*^2")
exo_dict['planet']['depth'] = 4.0e-3 # flux ratio
exo_dict['planet']['i'] = 82.6 # Orbital inclination in degrees
exo_dict['planet']['ars'] = 5.13 # Semi-major axis / stellar radius
exo_dict['planet']['period'] = 0.8135 # Orbital period in days
exo_dict['planet']['transit_duration']= 4170.0/60/60/24#(optional if given above info) transit duration in days
exo_dict['planet']['w'] = 90 #(optional) longitude of periastron. Default is 90
exo_dict['planet']['ecc'] = 0 #(optional) eccentricity. Default is 0
inst_dict = jdi.load_mode_dict('WFC3 G141')
exo_dict['observation']['noccultations'] = 5 # Number of transits/eclipses
inst_dict['configuration']['detector']['subarray'] = 'GRISM256' # GRISM256 or GRISM512
inst_dict['configuration']['detector']['nsamp'] = 10 # WFC3 N_SAMP, 1..15
inst_dict['configuration']['detector']['samp_seq'] = 'SPARS10' # WFC3 SAMP_SEQ, SPARS5 or SPARS10
inst_dict['strategy']['norbits'] = 3 # Number of HST orbits
inst_dict['strategy']['nchan'] = 15 # Number of spectrophotometric channels
inst_dict['strategy']['scanDirection'] = 'Forward' # Spatial scan direction, Forward or Round Trip
inst_dict['strategy']['schedulability'] = 30 # 30 for small/medium program, 100 for large program
inst_dict['strategy']['windowSize'] = 20 # (optional) Observation start window size in minutes. Default is 20 minutes.
inst_dict['strategy']['useFirstOrbit'] = True # (optional) Default is False, option to use first orbit
inst_dict['strategy']['calculateRamp'] = True # Enables ramp effect simulation for flux plot
inst_dict['strategy']['targetFluence'] = 30000 # Maximum pixel fluence level (in electrons)
foo = jdi.run_pandexo(exo_dict, inst_dict, output_file='wasp43b.p')
foo['wfc3_TExoNS']['info']
inst_dict['configuration']['detector']['nsamp'] = None
inst_dict['configuration']['detector']['samp_seq'] = None
bar = jdi.run_pandexo(exo_dict, inst_dict, output_file='wasp43b.p')
bar['wfc3_TExoNS']['info']
inst_dict['strategy']['scanDirection'] = 'Round Trip'
hst = jdi.run_pandexo(exo_dict, inst_dict, output_file='wasp43b.p')
hst['wfc3_TExoNS']['info']
import pandexo.engine.justplotit as jpi
#using foo from above
#other keys include model=True/False
datawave, dataspec, dataerror, modelwave, modelspec = jpi.hst_spec(foo)
#using foo from above
obsphase1, obstr1, obsphase2, obstr2,rms = jpi.hst_time(foo)
obsphase1, counts1, obsphase2, counts2, noise = jpi.hst_simulated_lightcurve(foo)
hst['wfc3_TExoNS']['info']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Edit Inputs
Step2: Edit stellar and planet inputs
Step3: Step 2) Load in instrument dictionary
Step4: Edit HST/WFC3 detector and observation inputs
Step5: Run PandExo
Step6: Plot Results
Step7: Compute earliest and latest possible start times for given start window size
Step8: Compute simulated lightcurves in fluence (unit
Step9: Print important info for observation
|
7,840
|
<ASSISTANT_TASK:>
Python Code:
def bcdToHexaDecimal(s ) :
len1 = len(s )
check = 0
num = 0
sum = 0
mul = 1
ans =[]
i = len1 - 1
while(i >= 0 ) :
sum +=(ord(s[i ] ) - ord('0' ) ) * mul
mul *= 2
check += 1
if(check == 4 or i == 0 ) :
if(sum <= 9 ) :
ans . append(chr(sum + ord('0' ) ) )
else :
ans . append(chr(sum + 55 ) ) ;
check = 0
sum = 0
mul = 1
i -= 1
len1 = len(ans )
i = len1 - 1
while(i >= 0 ) :
print(ans[i ] , end = "")
i -= 1
if __name__== ' __main __' :
s = "100000101111"
bcdToHexaDecimal(s )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,841
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
7,842
|
<ASSISTANT_TASK:>
Python Code:
from keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply
from keras.layers import RepeatVector, Dense, Activation, Lambda
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.models import load_model, Model
import keras.backend as K
import numpy as np
from faker import Faker
import random
from tqdm import tqdm
from babel.dates import format_date
from nmt_utils import *
import matplotlib.pyplot as plt
%matplotlib inline
m = 10000
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)
dataset[:10]
Tx = 30
Ty = 10
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
print("X.shape:", X.shape)
print("Y.shape:", Y.shape)
print("Xoh.shape:", Xoh.shape)
print("Yoh.shape:", Yoh.shape)
index = 0
print("Source date:", dataset[index][0])
print("Target date:", dataset[index][1])
print()
print("Source after preprocessing (indices):", X[index])
print("Target after preprocessing (indices):", Y[index])
print()
print("Source after preprocessing (one-hot):", Xoh[index])
print("Target after preprocessing (one-hot):", Yoh[index])
# Defined shared layers as global variables
repeator = RepeatVector(Tx)
concatenator = Concatenate(axis=-1)
densor1 = Dense(10, activation = "tanh")
densor2 = Dense(1, activation = "relu")
activator = Activation(softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook
dotor = Dot(axes = 1)
# GRADED FUNCTION: one_step_attention
def one_step_attention(a, s_prev):
Performs one step of attention: Outputs a context vector computed as a dot product of the attention weights
"alphas" and the hidden states "a" of the Bi-LSTM.
Arguments:
a -- hidden state output of the Bi-LSTM, numpy-array of shape (m, Tx, 2*n_a)
s_prev -- previous hidden state of the (post-attention) LSTM, numpy-array of shape (m, n_s)
Returns:
context -- context vector, input of the next (post-attetion) LSTM cell
### START CODE HERE ###
# Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states "a" (≈ 1 line)
s_prev = repeator(s_prev)
# Use concatenator to concatenate a and s_prev on the last axis (≈ 1 line)
concat = concatenator([a, s_prev])
# Use densor1 to propagate concat through a small fully-connected neural network to compute the "intermediate energies" variable e. (≈1 lines)
e = densor1(concat)
# Use densor2 to propagate e through a small fully-connected neural network to compute the "energies" variable energies. (≈1 lines)
energies = densor2(e)
# Use "activator" on "energies" to compute the attention weights "alphas" (≈ 1 line)
alphas = activator(energies)
# Use dotor together with "alphas" and "a" to compute the context vector to be given to the next (post-attention) LSTM-cell (≈ 1 line)
context = dotor([alphas, a])
### END CODE HERE ###
return context
n_a = 32
n_s = 64
post_activation_LSTM_cell = LSTM(n_s, return_state = True)
output_layer = Dense(len(machine_vocab), activation=softmax)
# GRADED FUNCTION: model
def model(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size):
Arguments:
Tx -- length of the input sequence
Ty -- length of the output sequence
n_a -- hidden state size of the Bi-LSTM
n_s -- hidden state size of the post-attention LSTM
human_vocab_size -- size of the python dictionary "human_vocab"
machine_vocab_size -- size of the python dictionary "machine_vocab"
Returns:
model -- Keras model instance
# Define the inputs of your model with a shape (Tx,)
# Define s0 and c0, initial hidden state for the decoder LSTM of shape (n_s,)
X = Input(shape=(Tx, human_vocab_size))
s0 = Input(shape=(n_s,), name='s0')
c0 = Input(shape=(n_s,), name='c0')
s = s0
c = c0
# Initialize empty list of outputs
outputs = []
### START CODE HERE ###
# Step 1: Define your pre-attention Bi-LSTM. Remember to use return_sequences=True. (≈ 1 line)
a = Bidirectional(LSTM(n_a, return_sequences = True))(X)
# Step 2: Iterate for Ty steps
for t in range(Ty):
# Step 2.A: Perform one step of the attention mechanism to get back the context vector at step t (≈ 1 line)
context = one_step_attention(a, s)
# Step 2.B: Apply the post-attention LSTM cell to the "context" vector.
# Don't forget to pass: initial_state = [hidden state, cell state] (≈ 1 line)
s, _, c = post_activation_LSTM_cell(context, initial_state = [s, c])
# Step 2.C: Apply Dense layer to the hidden state output of the post-attention LSTM (≈ 1 line)
out = output_layer(s)
# Step 2.D: Append "out" to the "outputs" list (≈ 1 line)
outputs.append(out)
# Step 3: Create model instance taking three inputs and returning the list of outputs. (≈ 1 line)
model = Model(inputs = [X, s0, c0], outputs = outputs)
### END CODE HERE ###
return model
model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
model.summary()
### START CODE HERE ### (≈2 lines)
opt = Adam(lr = 0.005, beta_1 = 0.9, beta_2 = 0.999, decay = 0.01)
model.compile(opt, loss = 'categorical_crossentropy', metrics = ['accuracy'])
### END CODE HERE ###
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0,1))
model.fit([Xoh, s0, c0], outputs, epochs=1, batch_size=100)
model.load_weights('models/model.h5')
EXAMPLES = ['3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007', 'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001']
for example in EXAMPLES:
source = string_to_int(example, Tx, human_vocab)
source = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), source))).swapaxes(0,1)
prediction = model.predict([source, s0, c0])
prediction = np.argmax(prediction, axis = -1)
output = [inv_machine_vocab[int(i)] for i in prediction]
print("source:", example)
print("output:", ''.join(output))
model.summary()
attention_map = plot_attention_map(model, human_vocab, inv_machine_vocab, "Tuesday 09 Oct 1993", num = 7, n_s = 64)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 - Translating human readable dates into machine readable dates
Step2: You've loaded
Step3: You now have
Step4: 2 - Neural machine translation with attention
Step6: Now you can use these layers to implement one_step_attention(). In order to propagate a Keras tensor object X through one of these layers, use layer(X) (or layer([X,Y]) if it requires multiple inputs.), e.g. densor(X) will propagate X through the Dense(1) layer defined above.
Step7: You will be able to check the expected output of one_step_attention() after you've coded the model() function.
Step9: Now you can use these layers $T_y$ times in a for loop to generate the outputs, and their parameters will not be reinitialized. You will have to carry out the following steps
Step10: Run the following cell to create your model.
Step11: Let's get a summary of the model to check if it matches the expected output.
Step12: Expected Output
Step13: The last step is to define all your inputs and outputs to fit the model
Step14: Let's now fit the model and run it for one epoch.
Step15: While training you can see the loss as well as the accuracy on each of the 10 positions of the output. The table below gives you an example of what the accuracies could be if the batch had 2 examples
Step16: You can now see the results on new examples.
Step17: You can also change these examples to test with your own examples. The next part will give you a better sense on what the attention mechanism is doing--i.e., what part of the input the network is paying attention to when generating a particular output character.
Step18: Navigate through the output of model.summary() above. You can see that the layer named attention_weights outputs the alphas of shape (m, 30, 1) before dot_2 computes the context vector for every time step $t = 0, \ldots, T_y-1$. Lets get the activations from this layer.
|
7,843
|
<ASSISTANT_TASK:>
Python Code:
# standard import if you're using "formula notation"
import statsmodels.formula.api as smf
lm = smf.ols(formula='Sales ~ TV', data=data).fit()
lm.params
# lets make a prediction if TV advertising would spend $50,000
# Statsmodels formula interface expects a datarames
X_new = pd.DataFrame({'TV':[50]})
X_new
lm.predict(X_new)
# create a dataframe with the minimum and maximum values of TV
X_new = pd.DataFrame({'TV':[data.TV.min(), data.TV.max()]})
X_new
preds = lm.predict(X_new)
preds
# first plot the observed data, then plot the least squares line
data.plot(kind='scatter', x='TV', y='Sales')
plt.plot(X_new, preds, c='red', linewidth=2)
# confidence intervals
lm.conf_int()
lm.pvalues
lm.rsquared
# create a fitted model with all three features
lm = smf.ols(formula='Sales ~ TV + Radio + Newspaper', data=data).fit()
lm.params
lm.summary()
# redo above examples with scikit-learn
feature_cols = ['TV', 'Radio', 'Newspaper']
X = data[feature_cols]
y = data.Sales
# usual scikit-learn pattern; import, instantiate, fit
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X,y)
lm.intercept_
lm.coef_
# pair the feature names with the coefficients
zip(feature_cols, lm.coef_)
lm.predict([100, 25, 25])
list(zip(feature_cols, lm.coef_))
# calculate the R-squared
lm.score(X, y)
# set a seed for reproducibility
np.random.seed(12345)
nums = np.random.rand(len(data))
mask_large = nums > 0.5 # random cathegorical data small/large
# initially set Size to small, then change roughly half to be large
data['Size'] = 'small'
data.loc[mask_large,'Size'] = 'large' # apply mask
data.head()
# for scikit-learn, we need to represent all data numerically;
data['IsLarge'] = data.Size.map({'small':0, 'large':1})
data.head()
# redo multiple linear regression and include IsLarge predictor
feature_cols = ['TV', 'Radio', 'Newspaper', 'IsLarge']
X = data[feature_cols]
y = data.Sales
# instantiate, fit
lm = LinearRegression()
lm.fit(X,y)
list(zip(feature_cols, lm.coef_))
# for reproducibilitty
np.random.seed(123456)
# assign roughly one third of observations in each category
nums = np.random.rand(len(data))
mask_suburban = (nums > 0.33) & (nums < 0.66)
mask_urban = (nums > 0.66)
data['Area'] = 'rural'
data.loc[mask_suburban, 'Area'] = 'suburban'
data.loc[mask_urban, 'Area'] = 'urban'
data.head()
# create three dummy variables using get_dummies, then exclude the first dummy column
area_dummies = pd.get_dummies(data.Area, prefix='Area').iloc[:,1:]
area_dummies.head()
data = pd.concat([data, area_dummies], axis=1)
data.head()
feature_cols = feature_cols + ['Area_suburban', 'Area_urban']
feature_cols
X = data[feature_cols]
y = data.Sales
lm = LinearRegression()
lm.fit(X,y)
list(zip(feature_cols, lm.coef_))
lm.predict([100,46,45, 1, 1, 0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting the Least Squares Line
Step2: null hypothesis
Step3: The most common way to evaluate the overall fit of a linear model is by the R-squared value. R-squared is the proportion of variance explained, meaning the proportion of variance in the observed data that is explained by the model, or the reduction in error over the null model.
Step4: Multiple Linear Regression
Step5: Interpretation
Step6: Interpretation
Step7: What if one of our predictors was categorical, rather than numeric?
Step8: How do we interpret the IsLarge coefficient? For a given amount of TV/Radio/Newspaper ad spending, being a large market is associated with an average increase in sales of 57.42 widgets (compared to small market which is called the baseline level).
Step9: Again, we have to represent Area numerically, but we cannot simply encode it as 0=rural, 1=suburban, 2=urban because it would imply an ordered relationship between suburban and urban. Instead, another dummy
Step10: In general, if you have a categorical feature with k levels, you create k-1 dummy variables. Because the other dummies capture all the information about the feature. The "left out" will be the baseline.
Step11: How do we interpret, again? Holding all other variables fixed, being a suburban area is associated with an average decrease in sales of 106.56 widgets (compared to baseline which is rural). Being an urban area is associated with an average of 268 widgets sales increase.
|
7,844
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
dat_file = '../examples/1-rosenbrock/dakota.dat'
data = numpy.loadtxt(dat_file, skiprows=1, unpack=True, usecols=[0,2,3,4])
data
plot(data[1,], data[2,], 'ro')
xlim((-2, 2))
ylim((-2, 2))
xlabel('$x_1$')
ylabel('$x_2$')
title('Planview of parameter study locations')
plot(data[-1,], 'bo')
xlabel('index')
ylabel('Rosenbrock fuction value')
title('Rosenbrock function values at study locations')
min(data[-1,:])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the Dakota tabular data file.
Step2: Plot the path taken in the vector parameter study.
Step3: Plot the values of the Rosenbrock function at the study locations.
Step4: What's the minimum value of the function over the study locations?
|
7,845
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import tempfile
import textwrap
import dm_construction
from IPython.display import HTML
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy as np
## Helper Functions
def show_rgb_observation(rgb_observation, size=5):
Plots a RGB observation, as returned from a Unity environment.
Args:
rgb_observation: numpy array of pixels
size: size to set the figure
_, ax = plt.subplots(figsize=(size, size))
ax.imshow(rgb_observation)
ax.set_axis_off()
ax.set_aspect("equal")
def print_status(env_, time_step_):
Prints reward and episode termination information.
status = "r={}, p={}".format(time_step_.reward, time_step_.discount)
if time_step_.discount == 0:
status += " (reason: {})".format(env_.termination_reason)
print(status)
dm_construction.ALL_TASKS
dm_construction.ALL_WRAPPERS
# Create the environment.
env = dm_construction.get_environment(
"covering", wrapper_type="discrete_relative", difficulty=0)
env.action_spec()
env.observation_spec()
np.random.seed(1234)
time_step = env.reset()
# Get the image observation from the task environment.
show_rgb_observation(env.core_env.last_time_step.observation["RGB"])
# Pick an edge.
obs = time_step.observation
moved_block = 0
base_block = 7
edge_index = list(
zip(obs["senders"], obs["receivers"])).index((moved_block, base_block))
# Construct the action.
action = {
"Index": edge_index,
"sticky": 1, # make it sticky
"x_action": 0, # place it to the left
}
time_step = env.step(action)
print_status(env, time_step)
# Get the image observation from the task environment.
show_rgb_observation(env.core_env.last_time_step.observation["RGB"])
# Pick an edge.
obs = time_step.observation
moved_block = 3
base_block = len(obs["nodes"]) - 1
edge_index = list(
zip(obs["senders"], obs["receivers"])).index((moved_block, base_block))
# Construct the action.
action = {
"Index": edge_index,
"sticky": 0, # make it not sticky
"x_action": 12, # place it to the right
}
time_step = env.step(action)
print_status(env, time_step)
# Get the image observation from the task environment.
show_rgb_observation(env.core_env.last_time_step.observation["RGB"])
# Stop the environment.
env.close()
# Create the environment.
env = dm_construction.get_environment(
"covering", wrapper_type="continuous_absolute", difficulty=0)
env.action_spec()
env.observation_spec()
# Start a new episode.
np.random.seed(1234)
time_step = env.reset()
# This is the same observation that agents will see.
show_rgb_observation(time_step.observation)
# Place a block a bit to the right.
action = {
"Horizontal": 1,
"Vertical": 1,
"Sticky": -1,
"Selector": 0
}
time_step = env.step(action)
show_rgb_observation(time_step.observation)
print_status(env, time_step)
# Place another block in the center.
action = {
"Horizontal": 0,
"Vertical": 2,
"Sticky": 1,
"Selector": 0
}
time_step = env.step(action)
show_rgb_observation(time_step.observation)
print_status(env, time_step)
# Stop the environment.
env.close()
def get_environment(problem_type, wrapper_type="discrete_relative",
difficulty=0, curriculum_sample=False):
Gets the environment.
This function separately creates the unity environment and then passes it to
the environment factory. We do this so that we can add an observer to the
unity environment to get all frames from which we will create a video.
Args:
problem_type: the name of the task
wrapper_type: the name of the wrapper
difficulty: the difficulty level
curriculum_sample: whether to sample difficulty from [0, difficulty]
Returns:
env_: the environment
# Separately construct the Unity env, so we can enable the observer camera
# and set a higher resolution on it.
unity_env = dm_construction.get_unity_environment(
observer_width=600,
observer_height=600,
include_observer_camera=True,
max_simulation_substeps=50)
# Create the main environment by passing in the already-created Unity env.
env_ = dm_construction.get_environment(
problem_type, unity_env, wrapper_type=wrapper_type,
curriculum_sample=curriculum_sample, difficulty=difficulty)
# Create an observer to grab the frames from the observer camera.
env_.core_env.enable_frame_observer()
return env_
def make_video(frames_):
Creates a video from a given set of frames.
# Create the Matplotlib animation and save it to a temporary file.
with tempfile.NamedTemporaryFile(suffix=".mp4") as fh:
writer = animation.FFMpegWriter(fps=20)
fig = plt.figure(frameon=False, figsize=(10, 10))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis("off")
ax.set_aspect("equal")
im = ax.imshow(np.zeros_like(frames_[0]), interpolation="none")
with writer.saving(fig, fh.name, 50):
for frame in frames_:
im.set_data(frame)
writer.grab_frame()
plt.close(fig)
# Read and encode the video to base64.
mp4 = open(fh.name, "rb").read()
data_url = "data:video/mp4;base64," + base64.b64encode(mp4).decode()
# Display the video in the notebook.
return HTML(textwrap.dedent(
<video controls>
<source src="{}" type="video/mp4">
</video>
.format(data_url).strip()))
# Create the environment.
env = get_environment("covering", wrapper_type="continuous_absolute")
# Reset the episode.
np.random.seed(1234)
time_step = env.reset()
frames = env.core_env.pop_observer_frames()
# Take an action.
action = {
"Horizontal": 0,
"Vertical": 5,
"Sticky": 0,
"Selector": 0
}
time_step = env.step(action)
print_status(env, time_step)
# Get all the intermediate frames.
frames.extend(env.core_env.pop_observer_frames())
# Stop the environment.
env.close()
# Display the results as a video. Here you can see the block falling from a
# large height and eventually colliding with an obstacle.
make_video(frames)
# Create the environment.
env = get_environment("marble_run", wrapper_type="continuous_absolute")
# Reset the episode.
np.random.seed(1234)
time_step = env.reset()
frames = env.core_env.pop_observer_frames()
# Take an action.
action = {
"Horizontal": 0,
"Vertical": 5,
"Sticky": 1,
"Selector": 0
}
time_step = env.step(action)
print_status(env, time_step)
# Get all the intermediate frames.
frames.extend(env.core_env.pop_observer_frames())
# Stop the environment.
env.close()
# Display the results as a video
make_video(frames)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Installation
Step4: Supported tasks and wrappers
Step5: These are the wrappers that can be applied to the tasks
Step6: Discrete Relative Actions and Graph Observations
Step7: Continuous Absolute Actions and Image Observations
Step11: Creating Videos
|
7,846
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
df = pd.read_csv("../input/fe-course-data/concrete.csv")
df.head()
X = df.copy()
y = X.pop("CompressiveStrength")
# Train and score baseline model
baseline = RandomForestRegressor(criterion="mae", random_state=0)
baseline_score = cross_val_score(
baseline, X, y, cv=5, scoring="neg_mean_absolute_error"
)
baseline_score = -1 * baseline_score.mean()
print(f"MAE Baseline Score: {baseline_score:.4}")
X = df.copy()
y = X.pop("CompressiveStrength")
# Create synthetic features
X["FCRatio"] = X["FineAggregate"] / X["CoarseAggregate"]
X["AggCmtRatio"] = (X["CoarseAggregate"] + X["FineAggregate"]) / X["Cement"]
X["WtrCmtRatio"] = X["Water"] / X["Cement"]
# Train and score model on dataset with additional ratio features
model = RandomForestRegressor(criterion="mae", random_state=0)
score = cross_val_score(
model, X, y, cv=5, scoring="neg_mean_absolute_error"
)
score = -1 * score.mean()
print(f"MAE Score with Ratio Features: {score:.4}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can see here the various ingredients going into each variety of concrete. We'll see in a moment how adding some additional synthetic features derived from these can help a model to learn important relationships among them.
Step2: If you ever cook at home, you might know that the ratio of ingredients in a recipe is usually a better predictor of how the recipe turns out than their absolute amounts. We might reason then that ratios of the features above would be a good predictor of CompressiveStrength.
|
7,847
|
<ASSISTANT_TASK:>
Python Code:
# create some noise
a = np.random.randn(50, 600, 100)
a.shape
# create some noise with higher variance and add bias.
b = 2. * np.random.randn(*a.shape) + 1.
b.shape
# manufacture some loss function
# there are n_epochs * n_batchs * batch_size
# recorded values of the loss
loss = 10 / np.linspace(1, 100, a.size)
loss.shape
# we will create the indices from the
# product of these iterators
list(map(range, a.shape))
# create the MultiIndex
index = pd.MultiIndex.from_product(
list(map(range, a.shape)),
names=['epoch', 'batch', 'datapoint']
)
# create the dataframe that records the two losses
df = pd.DataFrame(
dict(loss1=loss+np.ravel(a),
loss2=loss+np.ravel(b)),
index=index
)
df
# some basic plotting
fig, ax = plt.subplots()
df.groupby(['epoch', 'batch']).mean().plot(ax=ax)
plt.show()
%%time
df.to_csv('losses.csv')
!ls -lh losses.csv
%%time
df_from_csv = pd.read_csv('losses.csv', index_col=['epoch', 'batch', 'datapoint'], float_precision='high')
# does not recover exactly due to insufficient floating point precision
df_from_csv.equals(df)
# but it has recovered it up to some tiny epsilon
((df-df_from_csv)**2 < 1e-25).all()
%%time
df.to_hdf('store.h5', key='losses')
!ls -lh store.h5
%%time
df_from_hdf = pd.read_hdf('store.h5', key='losses')
df.equals(df_from_hdf)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MultiIndex Dataframe
Step2: Visualization
Step3: CSV Read/Write
Step4: HDF5 Read/Write
Step5: Furthermore, the file sizes are significantly smaller.
Step6: Lastly, it is far more numerical precise.
|
7,848
|
<ASSISTANT_TASK:>
Python Code:
## Q2 Solution.
def hash(x):
return math.fmod(3 * x + 2, 11)
for i in xrange(1,12):
print hash(i)
## Q3 Solution.
prob = 1.0 / 10
a = (1 - prob)**4
print a
b = (1 - ( 1 - (1 - prob)**2) )**2
print b
c = (1 - (1.0 /10 * 1.0 / 9))
print c
## Q5 Solution.
vec1 = np.array([2, 1, 1])
vec2 = np.array([10, -7, 1])
print vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
## Q6 Solution.
# probability that they agree at one particular band
p1 = 0.6**2
print (1 - p1)**3
## Q7 Solution.
p1 = 1 - (1 - .9)**3
p2 = 1 - (1 - .1)**3
print "new LSH is (.4, .6, {}, {})-sensitive family".format(p1, p2)
## Q9 Solution.
M = np.array([[0, 0, 0, .25],
[1, 0, 0, .25],
[0, 1, 0, .25],
[0, 0, 1, .25]])
r = np.array([.25, .25, .25, .25])
for i in xrange(30):
r = M.dot(r)
print r
## Q10 Solution.
print 1 - (1 - .3)*(1 - .4)
##Q12
L = np.array([[-.25, -.5, -.76, -.29, -.03, -.07, -.01],
[-.05, -.1, -.15, .20, .26, .51, .77 ]]).T
print L
V = np.array([[6.74, 0],[0, 5.44]])
print V
R = np.array([[-.57, -.11, -.57, -.11, -.57],
[-.09, 0.70, -.09, .7, -.09]])
print R
print L.dot(V).dot(R)
X = 0.8 * np.array([[1.0/3, 0, 0],
[1.0/3, 0, 0],
[1.0/3, 1, 0]])
X += 0.2 * np.array([[.5, .5, .5],
[.5, .5, .5],
[ 0, 0, 0]])
print X
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q3.
Step2: Q4.
Step3: Q6.
Step4: Q7.
Step5: Q8.
Step6: Q10.
Step7: Q11.
Step8: Q13.
|
7,849
|
<ASSISTANT_TASK:>
Python Code:
c1 = lambda x: x + 1
c2 = lambda x: -x + 2
x1 = np.linspace(0.01, 2, 10)
x2 = np.linspace(-2, -0.01, 10)
plt.plot(x1, c1(x1), label=r"$y = x + 1$")
plt.plot(x2, c2(x2), label=r"$y = -x + 2$")
plt.plot(0, 2, 'wo', markersize=7)
plt.plot(0, 1, 'wo', markersize=7)
ax = plt.axes()
ax.set_ylim(0, 4)
plt.legend(loc=3)
g = lambda x: np.sin(x) / x
h = lambda x: (1 - np.cos(x)) / x
x = np.linspace(-3 * np.pi, 3 * np.pi, 100)
ax = plt.axes()
ax.set_xlim(-3 * np.pi, 3 * np.pi)
ax.set_ylim(-1, 1.25)
plt.plot(x, g(x), label=r"$y = g(x) = \frac{\sin x}{x}$")
plt.plot(x, h(x), label=R"$y = h(x) = \frac{1 - \cos x}{x}$")
plt.plot(0, 1, 'wo', markersize=7)
plt.plot(0, 0, 'wo', markersize=7)
plt.legend(loc=4)
f = lambda x: 1/x
x1 = np.linspace(-0.5, -0.01, 1000)
x2 = np.linspace(0.01, 0.5, 1000)
ax = plt.axes()
#ax.spines['left'].set_position(('data', 0))
#ax.spines['bottom'].set_position(('data', 0))
ax.set_xlim(-0.1, 0.1)
plt.plot(x1, f(x1), 'b')
plt.plot(x2, f(x2), 'b')
f0 = lambda x: 1/x
f1 = lambda x: -1/x**2
x1 = np.linspace(-0.5, -0.01, 1000)
x2 = np.linspace(0.01, 0.5, 1000)
p1 = plt.subplot(211)
p1.set_xlim(-0.1, 0.1)
plt.plot(x1, f0(x1), 'b', label=r"$y = 1/x$")
plt.plot(x2, f0(x2), 'b')
plt.legend(loc=4)
p2 = plt.subplot(212)
p2.set_xlim(-0.1, 0.1)
p2.set_ylim(-2000, 0)
plt.plot(x1, f1(x1), 'g', label=r"$y = -1/x^2$")
plt.plot(x2, f1(x2), 'g')
plt.legend(loc=4)
f = lambda x: np.sin(1/x)
x1 = np.linspace(-0.1, -0.01, 100)
x2 = np.linspace(0.01, 0.1, 100)
ax = plt.axes()
ax.set_xlim(-0.1, 0.1)
ax.set_ylim(-1.2, 1.2)
plt.plot(x1, f(x1))
plt.plot(x2, f(x2))
f = lambda x: x**2
fig, ax = plt.subplots()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_position(('data', 0))
ax.spines['left'].set_position(('data', 0))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticklabels(['$x_0$', '$x$'])
ax.yaxis.set_ticklabels(['$y_0$', '$y$'])
ax.xaxis.set_ticks([1, 1.5])
ax.yaxis.set_ticks([1, f(1.5)])
ax.set_xlim(-1, 2)
ax.set_ylim(-1, 3)
x = np.linspace(-1, 2, 100)
plt.plot(x, f(x))
plt.plot(1, f(1), 'ko')
plt.plot(1.5, f(1.5), 'ko')
plt.plot([1, 1.5], [f(1), f(1)], 'k--')
plt.plot([1.5, 1.5], [f(1), f(1.5)], 'k--')
plt.plot([1, 1.5], [f(1), f(1.5)], 'k--')
plt.annotate('$P$', (0.8, 1))
plt.annotate('$Q$', (1.3, f(1.5)))
plt.annotate('$\Delta{x}$', (1.25, 0.75))
plt.annotate('$\Delta{f}$', (1.55, 1.5))
f = lambda x: 1 / (1 + x**2)
x = np.linspace(-2, 2, 100)
y = f(x)
ax = plt.axes()
ax.set_ylim(0, 1.25)
plt.plot(x, y, label=r"$y = \frac{1}{1 + x^2}$")
plt.legend()
f_acc = lambda x: (-2 * x) / ((1 + x**2)**2)
x = np.linspace(-2, 2, 100)
plt.plot(x, f_acc(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Even though the value of $y$ for when $x = 0$ is undefined we can say something about the limits of this function.
Step2: Note that dividing by zero is an undefined operation, both of these functions are undefined for when $x = 0$ so we'll have two little circles in the plot. However we can see that $\lim_{x^+\to 0}g(x) = 1$ and that $\lim_{x^-\to 0}g(x) = 1$ so generally we can say that $\lim_{x\to 0}g(x) = 1$. We can also see that $\lim_{x^+\to 0}h(x) = 0$ and $\lim_{x^-\to 0}h(x) = 0$ so $\lim_{x\to 0}h(x) = 0$.
Step3: Now we see that $\lim_{x^+\to 0}\frac{1}{x} = \infty$ and $\lim_{x^-\to 0}\frac{1}{x} = -\infty$ and even though some people might say that these limits are undefined they are going in a definite direction so if able we should specify what they are.
Step4: If we take the derivative of an odd function we get an even function.
Step5: As we approach $x = 0$ it will oscilate into infinity. There is no left or right limit in this case.
Step6: We can now define, $f'(x_0)$ (the derivative) of $f$ at $x_0$ is the slope of the tangent line to $y = f(x)$ at the point $P$. The tangent line is equal to the limit of secant lines $PQ$ as $Q\to P$ where $P$ is fixed. In the picture above we can see that the slope of our our secant line $PQ$ is simply defined as $\frac{\Delta{f}}{\Delta{x}}$. However we can now define the slope $m$ of our tangent line as
Step7: Now let's compute $f'(x)$.
|
7,850
|
<ASSISTANT_TASK:>
Python Code:
__author__ = 'Shahariar Rabby'
import email
import imaplib
import ctypes
import getpass
import threading
from playsound import playsound
def user():
# ORG_EMAIL = "@gmail.com"
# FROM_EMAIL = "your mail" + ORG_EMAIL
# FROM_PWD = "your pass"
FROM_EMAIL = raw_input("insert Email : ")+'@gmail.com'
FROM_PWD = getpass.getpass("input : ")
return FROM_EMAIL,FROM_PWD
def login():
FROM_EMAIL,FROM_PWD = user()
mail = imaplib.IMAP4_SSL('imap.gmail.com', 993)
mail.login(FROM_EMAIL, FROM_PWD)
mail.select("INBOX")
print 'Login successful'
return mail
mail = login()
def read_email_from_gmail(mail):
try:
type, data = mail.search(None, 'ALL') #Sharching all message frm inbox
mail_ids = data[0] #Assining all mail id to mail_ids Variable
id_list = mail_ids.split() #putting all mail id to id_list array
first_email_id = int(id_list[0]) #getting first mail id
latest_email_id = int(id_list[-1]) #getting last mail id
for i in range(latest_email_id, latest_email_id - 10, -1): #this loop reading last 10 message
typ, data = mail.fetch(i, '(RFC822)') #fatch mail data, and putting it a tuple where i=tuple no and 'RFC822' is mail
for response_part in data: #reading all data from i no message
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1]) #Reading mail
email_subject = msg['subject'] #Email subject
email_from = msg['from'] #Sender address
print 'From : ' + email_from
print 'Subject : ' + email_subject
print "Read mail: https://gmail.com\n"
except Exception, e:
print (str(e)) #printing if there is any error
def Check_Unseen():
mail.select("INBOX") #Selecting inbox
n = 0
(retcode, messages) = mail.search(None, '(UNSEEN)') #sharching unseen mail
if retcode == 'OK': #if unseen
for num in messages[0].split():
n = n + 1
print n #print message number
typ, data = mail.fetch(num, '(RFC822)') #fatching mail
for response_part in data:
if isinstance(response_part, tuple):
original = email.message_from_string(response_part[1])
print original['From']
data = original['Subject']
playsound('demonstrative.wav') #play sound when mail recive
print data
print "Read mail: https://gmail.com"
from sys import stdout
stdout.write(str('#')) #printing mail server is alive
def server():
Check_Unseen()
threading.Timer(15, server).start() #calling function evey 15 sec
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: User Details Function
Step2: Login function
Step3: Mail Server
Step4: This function call Check_Unseen in every 15 sec.
|
7,851
|
<ASSISTANT_TASK:>
Python Code:
# Model Configuration
UNITS = 2 ** 11 # 2048
ACTIVATION = 'relu'
DROPOUT = 0.1
# Training Configuration
BATCH_SIZE_PER_REPLICA = 2 ** 11 # powers of 128 are best
# TensorFlow
import tensorflow as tf
print("Tensorflow version " + tf.__version__)
# TF 2.3 version
# Detect and init the TPU
# try: # detect TPUs
# tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() # TPU detection
# strategy = tf.distribute.TPUStrategy(tpu)
# except ValueError: # detect GPUs
# strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
# print("Number of accelerators: ", strategy.num_replicas_in_sync)
# TF 2.2 version
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
print("Number of accelerators: ", strategy.num_replicas_in_sync)
# Plotting
import pandas as pd
import matplotlib.pyplot as plt
# Matplotlib defaults
plt.style.use('seaborn-whitegrid')
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
# Data
from kaggle_datasets import KaggleDatasets
from tensorflow.io import FixedLenFeature
AUTO = tf.data.experimental.AUTOTUNE
# Model
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import callbacks
def make_decoder(feature_description):
def decoder(example):
example = tf.io.parse_single_example(example, feature_description)
features = tf.io.parse_tensor(example['features'], tf.float32)
features = tf.reshape(features, [28])
label = example['label']
return features, label
return decoder
def load_dataset(filenames, decoder, ordered=False):
AUTO = tf.data.experimental.AUTOTUNE
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = (
tf.data
.TFRecordDataset(filenames, num_parallel_reads=AUTO)
.with_options(ignore_order)
.map(decoder, AUTO)
)
return dataset
dataset_size = int(11e6)
validation_size = int(5e5)
training_size = dataset_size - validation_size
# For model.fit
batch_size = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
steps_per_epoch = training_size // batch_size
validation_steps = validation_size // batch_size
# For model.compile
steps_per_execution = steps_per_epoch
feature_description = {
'features': FixedLenFeature([], tf.string),
'label': FixedLenFeature([], tf.float32),
}
decoder = make_decoder(feature_description)
data_dir = KaggleDatasets().get_gcs_path('higgs-boson')
train_files = tf.io.gfile.glob(data_dir + '/training' + '/*.tfrecord')
valid_files = tf.io.gfile.glob(data_dir + '/validation' + '/*.tfrecord')
ds_train = load_dataset(train_files, decoder, ordered=False)
ds_train = (
ds_train
.cache()
.repeat()
.shuffle(2 ** 19)
.batch(batch_size)
.prefetch(AUTO)
)
ds_valid = load_dataset(valid_files, decoder, ordered=False)
ds_valid = (
ds_valid
.batch(batch_size)
.cache()
.prefetch(AUTO)
)
def dense_block(units, activation, dropout_rate, l1=None, l2=None):
def make(inputs):
x = layers.Dense(units)(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation(activation)(x)
x = layers.Dropout(dropout_rate)(x)
return x
return make
with strategy.scope():
# Wide Network
wide = keras.experimental.LinearModel()
# Deep Network
inputs = keras.Input(shape=[28])
x = dense_block(UNITS, ACTIVATION, DROPOUT)(inputs)
x = dense_block(UNITS, ACTIVATION, DROPOUT)(x)
x = dense_block(UNITS, ACTIVATION, DROPOUT)(x)
x = dense_block(UNITS, ACTIVATION, DROPOUT)(x)
x = dense_block(UNITS, ACTIVATION, DROPOUT)(x)
outputs = layers.Dense(1)(x)
deep = keras.Model(inputs=inputs, outputs=outputs)
# Wide and Deep Network
wide_and_deep = keras.experimental.WideDeepModel(
linear_model=wide,
dnn_model=deep,
activation='sigmoid',
)
wide_and_deep.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['AUC', 'binary_accuracy'],
# experimental_steps_per_execution=steps_per_execution,
)
early_stopping = callbacks.EarlyStopping(
patience=2,
min_delta=0.001,
restore_best_weights=True,
)
lr_schedule = callbacks.ReduceLROnPlateau(
patience=0,
factor=0.2,
min_lr=0.001,
)
history = wide_and_deep.fit(
ds_train,
validation_data=ds_valid,
epochs=50,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
callbacks=[early_stopping, lr_schedule],
)
history_frame = pd.DataFrame(history.history)
history_frame.loc[:, ['loss', 'val_loss']].plot(title='Cross-entropy Loss')
history_frame.loc[:, ['auc', 'val_auc']].plot(title='AUC');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The next few sections set up the TPU computation, data pipeline, and neural network model. If you'd just like to see the results, feel free to skip to the end!
Step2: Notice that TensorFlow now detects eight accelerators. Using a TPU is a bit like using eight GPUs at once.
Step3: Model
Step4: Training
|
7,852
|
<ASSISTANT_TASK:>
Python Code:
cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7, Tcmb0=2.725)
# check to make sure we have defined the bpz filter path
if not os.getenv('EZGAL_FILTERS'):
os.environ['EZGAL_FILTERS'] = (f'{os.environ["HOME"]}/Projects/planckClusters/MOSAICpipe/bpz-1.99.3/FILTER/')
model = ezgal.model('bc03_ssp_z_0.02_salp.model')
model = model.make_exponential(1)
model.set_cosmology(Om=cosmo.Om0, Ol=cosmo.Ode0, h=cosmo.h, w=cosmo.w(0))
model.add_filter('g_MOSAICII.res', name='g')
model.add_filter('r_MOSAICII.res', name='r')
model.add_filter('i_MOSAICII.res', name='i')
model.add_filter('z_MOSAICII.res', name='z')
model.add_filter('K_KittPeak.res', name='K')
# Blanton 2003 Normalization
Mr_star = -20.44 + 5 * np.log10(cosmo.h) # abs mag.
# set the normalization
model.set_normalization('sloan_r', 0.1, Mr_star, vega=False)
# desired formation redshift
zf = 6.0
# fetch an array of redshifts out to given formation redshift
zs = model.get_zs(zf)
# Calculate some cosmological stuff
DM = cosmo.distmod(zs)
dlum = cosmo.luminosity_distance(zs)
def rho_crit(z, cosmo):
# convert G into better units:
G = const.G.to(u.km**2 * u.Mpc/(u.M_sun * u.s**2))
return 3 / (8 * np.pi * G) * cosmo.H0**2 * cosmo.efunc(z)**2 # Mpc^3
def schechterL(luminosity, phiStar, alpha, LStar):
Schechter luminosity function.
LOverLStar = (luminosity/LStar)
return (phiStar/LStar) * LOverLStar**alpha * np.exp(- LOverLStar)
def schechterM(magnitude, phiStar, alpha, MStar):
Schechter luminosity function by magnitudes.
# check to make sure things are all the same size
if isinstance(phiStar, float) and isinstance(magnitude, np.ndarray):
phiStar = np.ones_like(magnitude) * phiStar
if isinstance(alpha , float) and isinstance(magnitude, np.ndarray):
alpha = np.ones_like(magnitude) * alpha
if isinstance(MStar, float) and isinstance(magnitude, np.ndarray):
MStar = np.ones_like(magnitude) * MStar
MStarMinM = 0.4 * (MStar - magnitude)
return (0.4 * np.log(10) * phiStar * 10.0**(MStarMinM * (alpha + 1.)) * np.exp(-10.**MStarMinM))
# So now we are going to calculate the volumes as a function of z
#M200 = mass_func(zarr) * u.solMass
M200 = 1e15 * u.solMass
V200 = M200/ (200 * rho_crit(zs, cosmo))
# Calculate the M_star values
Mstar = model.get_absolute_mags(zf, filters='i', zs=zs)
# calculate the abs mag of our limiting magnitude as a function of z
mlim = 23.5
#Mlim = Mstar - 2.5 * np.log10(0.4)
Mlim = mlim - cosmo.distmod(zs).value - model.get_kcorrects(zf, filters='i', zs=zs)
# Here are the Schechter function stuff from Liu et al.
phi_star = 3.6 * cosmo.efunc(zs)**2
alpha = -1.05 * (1 + zs)**(-2/3)
fr = 0.8*(1 + zs)**(-1/2)
LF = []
for phi, a, M_star, M_lim in zip(phi_star, alpha, Mstar, Mlim):
if M_lim < M_star - 2.5 * np.log10(0.4):
Mlimit = M_lim
else:
Mlimit = M_star - 2.5 * np.log10(0.4)
y, err = quad(schechterM, -30, Mlimit, args=(phi, a, M_star))
LF.append(y)
plt.plot(zs, (LF * V200.value + 1) * fr)
ax = plt.gca()
#ax.set_yticks(np.arange(0, 75, 10))
plt.xlim(0.2, 4)
plt.ylim(0, 100)
plt.xlabel('redshift')
plt.ylabel('N (r < r200)')
plt.grid()
import pandas as pd
df = pd.read_csv('../hennig/tbl.tex', sep='&', header=None, engine='python', comment='%', dtype='object')
for i in range(len(df.columns)):
df[i] = df[i].map(lambda x: x.replace('$', ''))
df[i] = df[i].map(lambda x: x.strip())
df.columns = ['Cluster', 'M200', 'z', 'cg', 'N200', 'cg,rs', 'N200,rs', 'cg,nrs', 'N200,nrs']
for c in df.columns:
df.loc[df.loc[:, c].str.strip() == "-", c] = np.nan
for i in range(1, 2*len(df.columns), 2 ):
try:
df.insert(i+1, f'{df.columns[i]}_err', np.nan)
except IndexError:
pass
df = df.astype('object')
for i, row in df.iterrows():
for j in range(1, len(df.columns), 2):
col = df.columns[j]
col_err = df.columns[j + 1]
try:
# the error array is [plus, minus]
df.at[i, col_err] = [float(row[col].split('^')[1][2:7].rstrip('}')),
abs(float(row[col].split('^')[1][10:].rstrip('}')))]
df.at[i, col] = float(row[col].split('^')[0])
except IndexError:
df.at[i, col_err] = float(row[col].split('\pm')[-1])
df.at[i, col] = float(row[col].split('\pm')[0])
except AttributeError:
continue
# make numeric columns numeric
cols = df.columns
df[cols] = df[cols].apply(pd.to_numeric, errors='ignore')
### recalculate things ###
##########################
# So now we are going to calculate the volumes as a function of z
M200 = 6e14 * u.solMass
V200 = M200/ (200 * rho_crit(zs, cosmo))
# The Hennig paper gives their depths in table 2.
# I am just going to do everything in the i band.
# calculate the abs mag of our limiting magnitude as a function of z
mstar = model.get_apparent_mags(zf, filters='i', zs=zs)
mlim = mstar - 2.5 * np.log10(0.4)
Mlim = mlim - cosmo.distmod(zs).value - model.get_kcorrects(zf, filters='i', zs=zs)
#Mlim = model.get_absolute_mags(zf, filters='i', zs=zs) + 2
# Calculate the M_star values
Mstar = model.get_absolute_mags(zf, filters='i', zs=zs)
# Here are the Schechter function stuff from Liu et al.
phi_star = 3.6 * cosmo.efunc(zs)**2
alpha = -1.05 * (1 + zs)**(-2/3)
fr = 0.8*(1 + zs)**(-1/2)
LF = []
for phi, a, M_star, M_lim in zip(phi_star, alpha, Mstar, Mlim):
y, err = quad(schechterM, -30, M_lim, args=(phi, a, M_star))
LF.append(y)
# plot the model
plt.plot(zs, (LF * V200.value + 1) * fr)
ax = plt.gca()
#ax.set_yticks(np.arange(0, 75, 10))
plt.xlim(0.2, 1.2)
plt.ylim(0, 100)
plt.xlabel('redshift')
plt.ylabel('N (r < r200)')
plt.grid()
# add the clusters
for i, row in df.iterrows():
if (row.M200 + row.M200_err[0] > 6) and (6 > row.M200 - row.M200_err[1]):
if np.isnan(row['N200,rs']):
continue
#ax.errorbar(row.z, row['N200,rs'], yerr=np.array(row['N200,rs_err'][::-1])[:, np.newaxis], fmt='o', zorder=0, c=row.M200)
#ax.scatter(row.z, row['N200,rs'], fmt='o', zorder=0, c=)
ax.scatter(df.z, df['N200,rs'], c=df.M200, cmap='viridis', vmin=5, vmax=7, s=40, zorder=1)
#df.plot.scatter('z', 'N200,rs', ax=ax, c='M200', colormap='viridis',vmin=5, vmax=7, s=40)
#plt.axhline(6)
#df.loc[(df.M200 + df.M200_err[0] > 6) & (6 > df.M200 - df.M200_err[1])]
df.M200_err.data
# calculate the abs mag of our limiting magnitude as a function of z
mlim_arr = [19.5, 21.2]
for mlim in mlim_arr:
Mlim = mlim - cosmo.distmod(zs).value - model.get_kcorrects(zf, filters='i', zs=zs)
# Calculate the M_star values
Mstar = model.get_absolute_mags(zf, filters='i', zs=zs)
# Here are the Schechter function stuff from Liu et al.
phi_star = 3.6 * cosmo.efunc(zs)**2
alpha = -1.05 * (1 + zs)**(-2/3)
fr = 0.8*(1 + zs)**(-1/2)
LF = []
for phi, a, M_star, M_lim in zip(phi_star, alpha, Mstar, Mlim):
y, err = quad(schechterM, -np.inf, M_lim, args=(phi, a, M_star))
LF.append(y)
plt.plot(zs, (LF * V200.value + 1) * fr)
ax = plt.gca()
ax.set_yticks(np.arange(0, 75, 10))
plt.xlim(0.2, 1)
plt.ylim(0, 75)
plt.xlabel('redshift')
plt.ylabel('N (r < r200)')
plt.grid()
df.loc[(df.M200 + df.M200_err[0] > 6) and (6 > df.M200 - df.M200_err[1])]
2.5 * np.log10(0.4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Stellar Population
Step2: Calculate a few things to get going.
Step5: Define the functions that we'll need
Step6: Start Calculating things
Step7: Integrate
Step8: Plot
Step9: Add clusters from the Hennig paper.
Step10: Test Area Below...
|
7,853
|
<ASSISTANT_TASK:>
Python Code:
# Imports and utility functions
import time
import numpy as np
import matplotlib.pyplot as plt
from qutip.sesolve import sesolve
from qutip.solver import Options, solver_safe
from qutip import sigmax, sigmay, sigmaz, identity, tensor, basis, Bloch
def timing_val(func):
def wrapper(*arg, **kw):
'''source: http://www.daniweb.com/code/snippet368.html'''
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return res, (t2 - t1), func.__name__
return wrapper
def plot_exp(tlist, expects, lbls, title):
fig = plt.figure()
ax = fig.add_subplot(111)
for i, e in enumerate(expects):#
ax.plot(tlist, e, label=lbls[i])
ax.set_xlabel(r"$t$")
ax.set_title(title)
ax.legend()
def g_sine(t, args):
return args['A_s']*np.sin(args['w_s']*t)
def g_decay(t, args):
return args['A_d']*np.exp(-t/args['t_d'])
g_sine_str = "A_s*sin(w_s*t)"
g_decay_str = "A_d*exp(-t/t_d)"
t_tot = 10.0
w_1 = 0.3
w_2 = 0.3
w_i = 0.02
A_s = 0.3
A_d = 0.3
w_s = np.pi/t_tot
t_d = 5.0
tlist = np.linspace(0.0, t_tot, 200)
args = {'A_s': A_s, 'A_d': A_s, 'w_s': w_s, 't_d': t_d}
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(tlist, g_sine(tlist, args), label=r"$g_{\rm{sin}}$") #$")
ax.plot(tlist, g_decay(tlist, args), label=r"$g_{\rm{decay}}$")
ax.set_xlabel(r"$t$")
ax.set_title("Control functions")
ax.legend()
Id2 = identity(2)
Sz1 = tensor(sigmaz(), Id2)
Sz2 = tensor(Id2, sigmaz())
Sx1 = tensor(sigmax(), Id2)
Sy1 = tensor(sigmay(), Id2)
Sx2 = tensor(Id2, sigmax())
Sy2 = tensor(Id2, sigmay())
H_d1 = w_1*Sz1
H_d2 = w_2*Sz2
H_c1 = Sx1
H_c2 = Sy2
H_i = w_i*tensor(sigmaz(), sigmaz())
H_func_type = [H_d1, H_d2, [H_c1, g_sine], [H_c2, g_decay], H_i]
H_str_type = [H_d1, H_d2, [H_c1, g_sine_str], [H_c2, g_decay_str], H_i]
up_state = basis(2, 0)
b = Bloch()
b.add_states(up_state)
b.show()
init_state = tensor(up_state, up_state)
meas = [Sz1, Sx1, Sz2, Sx2]
@timing_val
def repeat_solve(H, init_state, tlist, num_reps=1,
e_ops=None, args=None, options=None):
if options is None:
options = Options()
out = sesolve(H, init_state, tlist,
e_ops=meas, args=args, options=options)
if num_reps > 1:
options.rhs_reuse = True
tl = np.array([0, tlist[-1]])
for i in range(num_reps - 1):
sesolve(H, init_state, tl,
e_ops=meas, args=args, options=options)
return out
n_reps = 1
out, t_func, fname = repeat_solve(H_func_type, init_state, tlist, num_reps=n_reps,
e_ops=meas, args=args)
print("{} execution of func type took {} seconds.".format(n_reps, t_func))
# Plot qubit 1 expectations
plot_exp(tlist, out.expect[:2], lbls=["E[Z]", "E[X]"],
title="Qubit 1 - func type")
n_reps = 1
out, t_func, fname = repeat_solve(H_str_type, init_state, tlist, num_reps=n_reps,
e_ops=meas, args=args)
print("{} execution of string type took {} seconds.".format(n_reps, t_func))
# Plot qubit 1 expectations
plot_exp(tlist, out.expect[:2], lbls=["E[Z]", "E[X]"],
title="Qubit 1 - string type")
n_rep_list = [1, 2, 5, 10, 20] #, 100] #, 1000, 20000, 100000]
# n_rep_list = [100, 1000, 20000, 100000, 500000]
t_per_exec_f = []
t_per_exec_s = []
H_zero = H_i*0.0
for i, n_reps in enumerate(n_rep_list):
out, t_func, fname = repeat_solve(H_func_type, init_state, tlist,
num_reps=n_reps,
e_ops=meas, args=args)
t_per_exec_f.append(t_func / n_reps)
#print("{} execution of func type took {} seconds.".format(n_reps, t_func))
# twisted method of making the code change to force new hash and
# hence recompile
key = 'nreps{}'.format(i)
args[key] = n_reps
H = list(H_str_type)
H.append([H_zero, key])
out, t_func, fname = repeat_solve(H, init_state, tlist,
num_reps=n_reps,
e_ops=meas, args=args)
#print("{} execution of string type took {} seconds.".format(n_reps, t_func))
t_per_exec_s.append(t_func / n_reps)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(n_rep_list, t_per_exec_f, 'o', label="func type")
ax.plot(n_rep_list, t_per_exec_s, 'x', label="string type")
ax.set_xlabel(r"$N_{\rm{reps}}$")
ax.set_ylabel("exec time per rep")
ax.set_title("Comparing Method Exec Time")
ax.legend()
n_rep_list = [1000, 5000, 10000, 15000, 20000, 50000, 100000]
t_per_exec_f = []
t_per_exec_s = []
H_zero = H_i*0.0
for i, n_reps in enumerate(n_rep_list):
out, t_func, fname = repeat_solve(H_func_type, init_state, tlist,
num_reps=n_reps,
e_ops=meas, args=args)
t_per_exec_f.append(t_func / n_reps)
#print("{} execution of func type took {} seconds.".format(n_reps, t_func))
# twisted method of making the code change to force new hash and
# hence recompile
key = 'nreps{}'.format(i)
args[key] = n_reps
H = list(H_str_type)
H.append([H_zero, key])
out, t_func, fname = repeat_solve(H, init_state, tlist,
num_reps=n_reps,
e_ops=meas, args=args)
#print("{} execution of string type took {} seconds.".format(n_reps, t_func))
t_per_exec_s.append(t_func / n_reps)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(n_rep_list, t_per_exec_f, 'o', label="func type")
ax.plot(n_rep_list, t_per_exec_s, 'x', label="string type")
ax.set_xlabel(r"$N_{\rm{reps}}$")
ax.set_ylabel("exec time per rep")
ax.set_title("Comparing Method Exec Time")
ax.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Time dependent control functions
Step2: Hamiltonians, initial state and measurements
Step3: Solving the dynamics
Step4: Function type
Step5: String type
Step6: Comparing execution times
|
7,854
|
<ASSISTANT_TASK:>
Python Code:
results_file = '../data/lda_data_8.pickle'
n_iters = 10
for n in range(n_iters):
print "iteration %d" % n
print results_file
X, Y, Yaudio = classification.load_data_from_pickle(results_file)
# get only 80% of the dataset.. to vary the choice of outliers
X, _, Y, _ = train_test_split(X, Y, train_size=0.8, stratify=Y)
print X.shape, Y.shape
# outliers
print "detecting outliers..."
df_global, threshold, MD = outliers.get_outliers_df(X, Y, chi2thr=0.999)
outliers.print_most_least_outliers_topN(df_global, N=10)
# write output
print "writing file"
df_global.to_csv('../data/outliers_'+str(n)+'.csv', index=False)
n_iters = 10
ranked_countries = pd.DataFrame()
ranked_outliers = pd.DataFrame()
for n in range(n_iters):
df_global = pd.read_csv('../data/outliers_'+str(n)+'.csv')
df_global = df_global.sort_values('Outliers', axis=0, ascending=False).reset_index()
ranked_countries = pd.concat([ranked_countries, df_global['Country']], axis=1)
ranked_outliers = pd.concat([ranked_outliers, df_global['Outliers']], axis=1)
ranked_countries_arr = ranked_countries.get_values()
# majority voting + precision at K
K_vote = 10
country_vote = Counter(ranked_countries_arr[:K_vote, :].ravel())
df_country_vote = pd.DataFrame.from_dict(country_vote, orient='index').reset_index()
df_country_vote.sort_values(0, ascending=False)
def precision_at_k(array, gr_truth, k):
return len(set(array[:k]) & set(gr_truth[:k])) / float(k)
k = 10
ground_truth = df_country_vote['index'].get_values()
p_ = []
for j in range(ranked_countries_arr.shape[1]):
p_.append(precision_at_k(ranked_countries_arr[:, j], ground_truth, k))
p_ = np.array(p_)
print 'mean', np.mean(p_)
print 'std', np.std(p_)
print p_
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Estimate precision at K
|
7,855
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
from IPython.html import widgets
def print_sum(a, b):
Print the sum of the arguments a and b.
print(a + b)
interact(print_sum, a = (-10., 10.,.1), b = (-8,8,2));
assert True # leave this for grading the print_sum exercise
def print_string(s, length=False):
Print the string s and optionally its length.
l = len(s)
print(s)
if length:
print(l)
interact(print_string, s="Hello World!", length = True);
assert True # leave this for grading the print_string exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interact basics
Step3: Use the interact function to interact with the print_sum function.
Step5: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
Step6: Use the interact function to interact with the print_string function.
|
7,856
|
<ASSISTANT_TASK:>
Python Code:
from scipy import misc as scm
import os.path as op
import matplotlib.pyplot as plt
% matplotlib inline
datadir = '/tmp/113_1/'
im = scm.imread(op.join(datadir,'0090.png'))
plt.imshow(im, cmap='gray')
plt.show()
import os
import numpy as np
files = os.listdir(datadir) # get a list of all files in the dataset
print 'X image size: ', im.shape[1] # second dimension is X in our png
print 'Y image size: ', im.shape[0] # first dimension is Y in our png
print 'Z image size: ', len(files) # we get Z by counting the number of images in our directory
print 'Time range: (0, 0)' # default value if the data is not time series
dtype = im.dtype
print 'Data type: ', dtype
try:
im_min = np.iinfo(dtype).max
im_max = np.iinfo(dtype).min
except:
im_min = np.finfo(dtype).max
im_max = np.finfo(dtype).min
for f in files: # get range by checking each slice min and max
temp_im = scm.imread(op.join(datadir, f))
im_min = np.min(temp_im) if np.min(temp_im) < im_min else im_min # update image stack min
im_max = np.max(temp_im) if np.max(temp_im) > im_max else im_max # update image stack max
print 'Window range: (%f, %f)' % (im_min, im_max)
print "more code here, as always"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step1: Step 2
Step2: It's also important to summarize what we've done, so that the user can Summarizing these results and those that require more intimate knowledge of the data, we come up with the following
|
7,857
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.cluster import KMeans
def kmeans_missing(X, n_clusters, max_iter=10):
Perform K-Means clustering on data with missing values.
Args:
X: An [n_samples, n_features] array of data to cluster.
n_clusters: Number of clusters to form.
max_iter: Maximum number of EM iterations to perform.
Returns:
labels: An [n_samples] vector of integer labels.
centroids: An [n_clusters, n_features] array of cluster centroids.
X_hat: Copy of X with the missing values filled in.
# Initialize missing values to their column means
# 非数值型、正无穷和负无穷都认为是缺失数据
missing = ~np.isfinite(X)
mu = np.nanmean(X, axis=0, keepdims=1) # 忽略 NaN,计算某一列的平均值
X_hat = np.where(missing, mu, X) # Return elements, either from x or y, depending on condition.
X_gm = X_hat.copy()
for i in xrange(max_iter):
if i > 0:
# initialize KMeans with the previous set of centroids. this is much
# faster and makes it easier to check convergence (since labels
# won't be permuted on every iteration), but might be more prone to
# getting stuck in local minima.
cls = KMeans(n_clusters, init=prev_centroids)
else:
# do multiple random initializations in parallel
cls = KMeans(n_clusters, n_jobs=-1)
# perform clustering on the filled-in data
labels = cls.fit_predict(X_hat)
centroids = cls.cluster_centers_
# fill in the missing values based on their cluster centroids
X_hat[missing] = centroids[labels][missing]
# when the labels have stopped changing then we have converged
if i > 0 and np.all(labels == prev_labels):
break
prev_labels = labels
prev_centroids = cls.cluster_centers_
return labels, centroids, X_hat, X_gm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
matplotlib.style.use('ggplot')
from sklearn.datasets import make_blobs
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def make_fake_data(fraction_missing, n_clusters=5, n_samples=5000,
n_features=3, seed=0):
# complete data
gen = np.random.RandomState(seed)
X, true_labels = make_blobs(n_samples, n_features, n_clusters,
random_state=gen)
# with missing values
missing = gen.rand(*X.shape) < fraction_missing
Xm = np.where(missing, np.nan, X)
return X, true_labels, Xm
X, true_labels, Xm = make_fake_data(fraction_missing=0.1, n_clusters=6, seed=10)
labels, centroids, X_hat, X_gm = kmeans_missing(Xm, n_clusters=6, max_iter=40)
fig = plt.figure(figsize=(18,12))
ax = fig.add_subplot(221, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=true_labels, cmap='gist_rainbow')
ax.set_title('Original data')
ax = fig.add_subplot(222, projection='3d')
ax.scatter(Xm[:, 0], Xm[:, 1], Xm[:, 2], c=true_labels, cmap='gist_rainbow')
ax.set_title('Missing data(30% missing values)')
ax = fig.add_subplot(223, projection='3d')
ax.scatter(X_gm[:, 0], X_gm[:, 1], X_gm[:, 2], c=true_labels, cmap='gist_rainbow')
ax.set_title('Imputed data using global mean values')
ax = fig.add_subplot(224, projection='3d')
ax.scatter(X_hat[:, 0], X_hat[:, 1], X_hat[:, 2], c=true_labels, cmap='gist_rainbow')
ax.set_title('Imputed data using cluster algorithm')
from sklearn.metrics import adjusted_mutual_info_score
cls = KMeans(n_clusters=5, n_jobs=-1)
true_labels = cls.fit_predict(X)
global_labels = cls.fit_predict(X_gm)
kmeans_labels = cls.fit_predict(X_hat)
print '全局平均值填充后的实际labels 与 聚类后的 labels 的互信息量:', adjusted_mutual_info_score(true_labels, global_labels)
print 'KMeans填充后的实际labels 与 聚类后的 labels 的互信息量:', adjusted_mutual_info_score(true_labels, kmeans_labels)
fraction = np.arange(0.0, 1.0, 0.05)
n_repeat = 10
scores = np.empty((2, fraction.shape[0], n_repeat))
for i, frac in enumerate(fraction):
for j in range(n_repeat):
X, true_labels, Xm = make_fake_data(fraction_missing=frac, n_clusters=5)
labels, centroids, X_hat, _ = kmeans_missing(Xm, n_clusters=5)
any_missing = np.any(~np.isfinite(Xm), 1)
scores[0, i, j] = adjusted_mutual_info_score(labels, true_labels)
scores[1, i, j] = adjusted_mutual_info_score(labels[any_missing],
true_labels[any_missing])
fig, ax = plt.subplots(1, 1)
scores_all, scores_missing = scores
ax.errorbar(fraction * 100, scores_all.mean(-1),
yerr=scores_all.std(-1), label='All labels')
ax.errorbar(fraction * 100, scores_missing.mean(-1),
yerr=scores_missing.std(-1),
label='Labels with missing values')
ax.set_xlabel('% missing values')
ax.set_ylabel('Adjusted mutual information')
ax.legend(loc='best', frameon=False)
ax.set_ylim(0, 1)
ax.set_xlim(-5, 100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Filling up missing data with cluster algorithm
Step2: Example with fake data
Step3: 可以看出,采用全局平均值填充缺失值后,数据分布差别较大;相反采用聚类算法填充缺失值的效果较好。
Step4: 可以看出:
|
7,858
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
from __future__ import print_function
import numpy as np
import SDSS
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import copy
# We want to select galaxies, and then are only interested in their positions on the sky.
data = pd.read_csv("downloads/SDSSobjects.csv",usecols=['ra','dec','u','g',\
'r','i','size'])
# Filter out objects with bad magnitude or size measurements:
data = data[(data['u'] > 0) & (data['g'] > 0) & (data['r'] > 0) & (data['i'] > 0) & (data['size'] > 0)]
# Make size cuts, to exclude stars and nearby galaxies, and magnitude cuts, to get good galaxy detections:
data = data[(data['size'] > 0.8) & (data['size'] < 10.0) & (data['i'] > 17) & (data['i'] < 22)]
# Drop the things we're not so interested in:
del data['u'], data['g'], data['r'], data['i'],data['size']
data.head()
Ngals = len(data)
ramin,ramax = np.min(data['ra']),np.max(data['ra'])
decmin,decmax = np.min(data['dec']),np.max(data['dec'])
print (Ngals,"galaxy-like objects in (ra,dec) range (",ramin,":",ramax,",",decmin,":",decmax,")")
#!pip install --upgrade TreeCorr
random = pd.DataFrame({'ra' : ramin + (ramax-ramin)*np.random.rand(Ngals), 'dec' : (180./np.pi)*np.arcsin(np.random.uniform(np.sin(decmin*np.pi/180.0), np.sin(decmax*np.pi/180.),Ngals))})
print (len(random), type(random))
fig, ax = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(15, 6)
plt.subplots_adjust(wspace=0.2)
random.plot(kind='scatter', x='ra', y='dec', ax=ax[0], title='Random')
ax[0].set_xlabel('RA / deg')
ax[0].set_ylabel('Dec. / deg')
data.plot(kind='scatter', x='ra', y='dec', ax=ax[1], title='Data')
ax[1].set_xlabel('RA / deg')
ax[1].set_ylabel('Dec. / deg')
import treecorr
random_cat = treecorr.Catalog(ra=random['ra'], dec=random['dec'], ra_units='deg', dec_units='deg')
data_cat = treecorr.Catalog(ra=data['ra'], dec=data['dec'], ra_units='deg', dec_units='deg')
# Set up some correlation function estimator objects:
sep_units='arcmin'
min_sep=0.5
max_sep=10.0
N = 7
bin_size = np.log10(1.0*max_sep/min_sep)/(1.0*N)
dd = treecorr.NNCorrelation(bin_size=bin_size, min_sep=min_sep, max_sep=max_sep, sep_units=sep_units, bin_slop=0.05/bin_size)
rr = treecorr.NNCorrelation(bin_size=bin_size, min_sep=min_sep, max_sep=max_sep, sep_units=sep_units, bin_slop=0.05/bin_size)
# Process the data:
dd.process(data_cat)
rr.process(random_cat)
# Combine into a correlation function and its variance:
xi, varxi = dd.calculateXi(rr)
plt.figure(figsize=(15,8))
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.errorbar(np.exp(dd.logr),xi,np.sqrt(varxi),c='blue',linewidth=2)
# plt.xscale('log')
plt.xlabel('$\\theta / {\\rm arcmin}$',fontsize=20)
plt.ylabel('$\\xi(\\theta)$',fontsize=20)
plt.ylim([-0.1,0.2])
plt.grid(True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Correlation Function
Step2: Random Catalogs
Step3: Now let's plot both catalogs, and compare.
Step4: Estimating $\xi(\theta)$
|
7,859
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
# Setup Seaborn
sns.set_style("whitegrid")
sns.set_context("poster")
df_offers = pd.read_excel("./WineKMC.xlsx", sheetname=0)
df_offers.columns = ["offer_id", "campaign", "varietal", "min_qty", "discount", "origin", "past_peak"]
df_offers.head()
df_transactions = pd.read_excel("./WineKMC.xlsx", sheetname=1)
df_transactions.columns = ["customer_name", "offer_id"]
df_transactions['n'] = 1
df_transactions.head()
#your turn
# merge the dataframes based on offer id
df_merged = pd.merge(df_transactions, df_offers, on='offer_id')
# create a matrix of customer name and offer id. Replace NaN values with zero and reset index to offer id rather than customer
x_cols = pd.pivot_table(df_merged, values='n', index=['customer_name'], columns=['offer_id']).fillna(0).reset_index()
# create dataframe without customer name
X = x_cols[x_cols.columns[1:]]
#your turn
from scipy.spatial.distance import cdist, pdist
from sklearn.cluster import KMeans
import numpy as np
# get Kmean and centroids
K = range(2, 11)
KM = [KMeans(n_clusters=k).fit(X) for k in K]
centroids = [k.cluster_centers_ for k in KM]
# compute euclidean distance
D_k = [cdist(X, mid, 'euclidean') for mid in centroids]
cIdx = [np.argmin(D,axis=1) for D in D_k]
dist = [np.min(D,axis=1) for D in D_k]
# Total with-in sum of square
tss = [sum(d**2) for d in dist]
# Construct a plot showing SSSS for each KK
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([1, 11])
ax.plot(K, tss, 'b*-')
ax.plot(K[6], tss[6], marker='o', markersize=12,
markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Within-cluster sum of squares')
plt.title('Elbow for KMeans clustering')
# setup KMEans for cluster = 8
cluster = KMeans(n_clusters=8)
# predict and assign to a cluster
x_cols['cluster'] = cluster.fit_predict(X)
y = x_cols.cluster.value_counts()
# index number is the cluster number
cluster = y.index.values
x_lim = np.arange(len(y))
# plot bar chart
plt.bar(x_lim, y, align='center', alpha=0.5)
plt.xticks(x_lim, cluster)
plt.ylabel('Counts')
plt.title('Number of points per cluster')
plt.show()
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
# Your turn.
from sklearn.metrics import silhouette_samples, silhouette_score
df_sil=[]
for n_clusters in range(2,10):
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
# add data to the list
df_sil.append([n_clusters, silhouette_avg])
# convert into a dataframe
df_sil=pd.DataFrame(df_sil, columns=['cluster', 'avg_score'])
# index number is the cluster number
cluster = df_sil.cluster
x_lim = np.arange(len(df_sil))
y= df_sil.avg_score
# plot bar chart
plt.bar(x_lim, y, align='center', alpha=0.5)
plt.xticks(x_lim, cluster)
plt.ylabel('silhoutte score')
plt.title('Silhoutte score per cluster')
plt.show()
#your turn
from sklearn.decomposition import PCA
cluster = KMeans(n_clusters=5)
x_cols['cluster'] = cluster.fit_predict(x_cols[x_cols.columns[1:]])
pca = PCA(n_components=2)
x_cols['x'] = pca.fit_transform(x_cols[x_cols.columns[1:]])[:,0]
x_cols['y'] = pca.fit_transform(x_cols[x_cols.columns[1:]])[:,1]
customer_clusters = x_cols[['customer_name', 'cluster', 'x', 'y']]
df = pd.merge(df_transactions, customer_clusters)
df = pd.merge(df_offers, df)
sns.lmplot('x', 'y',
data=df,
fit_reg=False,
hue="cluster",
scatter_kws={"marker": "D",
"s": 100})
plt.title('Scatter plot of clustered data')
df['is_4'] = df.cluster==4
print(df.groupby("is_4")[['min_qty', 'discount']].mean())
df.groupby("is_4").varietal.value_counts()
#your turn
# Initialize a new PCA model with a default number of components.
from sklearn.decomposition import PCA
# Do the rest on your own :)
from sklearn import decomposition
pca = PCA()
pca.fit(X)
pca_ratio = (np.round(pca.explained_variance_, decimals=4)*100)
K = [1, 2]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([1, 3])
ax.plot(K, pca_ratio, 'b*-')
plt.grid(True)
plt.xlabel('Number of dimensions')
plt.ylabel('PCA Explained variance')
plt.title('Elbow for PCA explained variance')
# your turn
# Affinity propagation
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
af = AffinityPropagation().fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
# your turn
# Spectral Clustering
from sklearn import cluster
for n_clusters in range(2,3):
#n_clusters = 4
spectral = cluster.SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
spectral.fit(X)
labels = spectral.labels_
print('Assigned number of clusters: %d' % n_clusters)
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
plt.scatter(X[:, 0], X[:, 1], c=spectral.labels_, cmap=plt.cm.spectral)
plt.title('Assigned number of clusters: %d' % n_clusters)
# AgglomerativeClustering
from sklearn.cluster import AgglomerativeClustering
for n_clusters in range(2,3):
#n_clusters = 4
linkage= 'ward'
model = AgglomerativeClustering(n_clusters=n_clusters)
model.fit(X)
labels = model.labels_
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
plt.scatter(X[:, 0], X[:, 1], c=model.labels_, cmap=plt.cm.spectral)
plt.title('linkage=%s' % (linkage), fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0, left=0, right=1)
plt.suptitle('n_cluster=%i' % (n_clusters), size=17)
plt.show()
# Your turn
# Using DBSCAN
from sklearn.cluster import DBSCAN
from sklearn import metrics
for eps in [.6]:
db = DBSCAN(eps=eps).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels))
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in list(zip(unique_labels, colors)):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: We see that the first dataset contains information about each offer such as the month it is in effect and several attributes about the wine that the offer refers to
Step3: Data wrangling
Step4: K-Means Clustering
Step5: Choosing K
Step6: Choosing $K$
Step7: What we've done is we've taken those columns of 0/1 indicator variables, and we've transformed them into a 2-D dataset. We took one column and arbitrarily called it x and then called the other y. Now we can throw each point into a scatterplot. We color coded each point based on it's cluster so it's easier to see them.
Step8: Other Clustering Algorithms
|
7,860
|
<ASSISTANT_TASK:>
Python Code:
F=graphviz.Graph()#(engine='neato')
F.graph_attr['rankdir'] = 'LR'
F.edge('A_1','B_1')
F.edge('A_1','B_2')
F.edge('A_2','B_1')
F.edge('A_3','B_1')
F.edge('A_4','B_2')
F.edge('A_5','B_2')
F.edge('A_5','B_3')
F
F=graphviz.Graph()
F.graph_attr['rankdir'] = 'LR'
F.edge('A_1, A_2, A_3','B_1')
F.edge('A_1, A_2, A_3','B_1')
F.edge('A_1, A_2, A_3','B_1')
F.edge('A_1, A_2, A_3','B_2, B_3')
F.edge('A_4, A_5','B_2, B_3')
F.edge('A_4, A_5','B_2, B_3')
F
with open("population_config.json","r") as configfilex:
cs=json.load(configfilex)
population=makePopulation(100,["something","age","postcode"],
pop_crude,
c1=cs["c1"],
c2=cs["c2"],
c3=cs["c3"])
print (population.keys())
n_clusters=len(set(population["cluster_label"]))
print (n_clusters)
fittable=np.array([population["something"],
population["postcode"],
population["age"]]).T
alg=AgglomerativeClustering(n_clusters=n_clusters, linkage='ward')
alg.fit(fittable)
clusterlabels=list(zip(alg.labels_,population["cluster_label"]))
print("Outs:{}".format(clusterlabels))
#ninja visualizing hamming distance using html
with open("htmlcolors.json","r") as colorfile:
colors=json.load(colorfile)
#sample "nclusters" colors from that list, use them for visulaizing
colors=list(colors.keys())
colors=np.random.choice(colors,size=n_clusters)
print(colors)
colormatch={label:colors[indx] for indx,label in enumerate(population["cluster_label"])}
#todo: something like map(lambda x,y: colormatch[x]=y, (alg.labels_,colors))
print (colormatch)
cell="<td bgcolor={}>{}</td>"
row="<tr>{}</tr>"
table="<!DOCTYPE html><html><body><table>{}</table></body></html>"
#%% HTML
#<iframe width="100%" height "25%" src="outs/clabels.html"></iframe>
linkage_methods=['ward', 'average', 'complete']
aggl=lambda x: AgglomerativeClustering(n_clusters=n_clusters, linkage=x)
import webcolors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The structure which we just described here happens to match a well-studied family of graphs known as Bipartite Graphs. There are tons of algorithms/ math tools that can be used to study such graphs. Lucky us!
Step2: Given some data we generate, we want to see what different algorithms tell us about
Step3: Agglomerative Clustering
Step4: Try other linkage methods in agglomerative
|
7,861
|
<ASSISTANT_TASK:>
Python Code:
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
data = pd.read_csv('data/glucose_insulin.csv', index_col='time');
I = interpolate(data.insulin)
params = Params(G0 = 290,
k1 = 0.03,
k2 = 0.02,
k3 = 1e-05)
def make_system(params, data):
Makes a System object with the given parameters.
params: sequence of G0, k1, k2, k3
data: DataFrame with `glucose` and `insulin`
returns: System object
G0, k1, k2, k3 = params
Gb = data.glucose[0]
Ib = data.insulin[0]
I = interpolate(data.insulin)
t_0 = get_first_label(data)
t_end = get_last_label(data)
init = State(G=G0, X=0)
return System(params,
init=init, Gb=Gb, Ib=Ib, I=I,
t_0=t_0, t_end=t_end, dt=2)
system = make_system(params, data)
def update_func(state, t, system):
Updates the glucose minimal model.
state: State object
t: time in min
system: System object
returns: State object
G, X = state
k1, k2, k3 = system.k1, system.k2, system.k3
I, Ib, Gb = system.I, system.Ib, system.Gb
dt = system.dt
dGdt = -k1 * (G - Gb) - X*G
dXdt = k3 * (I(t) - Ib) - k2 * X
G += dGdt * dt
X += dXdt * dt
return State(G=G, X=X)
update_func(system.init, system.t_0, system)
def run_simulation(system, update_func):
Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
init = system.init
t_0, t_end, dt = system.t_0, system.t_end, system.dt
frame = TimeFrame(columns=init.index)
frame.row[t_0] = init
ts = linrange(t_0, t_end, dt)
for t in ts:
frame.row[t+dt] = update_func(frame.row[t], t, system)
return frame
results = run_simulation(system, update_func);
results
subplot(2, 1, 1)
plot(results.G, 'b-', label='simulation')
plot(data.glucose, 'bo', label='glucose data')
decorate(ylabel='Concentration (mg/dL)')
subplot(2, 1, 2)
plot(results.X, 'C1', label='remote insulin')
decorate(xlabel='Time (min)',
ylabel='Concentration (arbitrary units)')
savefig('figs/chap18-fig01.pdf')
def slope_func(state, t, system):
Computes derivatives of the glucose minimal model.
state: State object
t: time in min
system: System object
returns: derivatives of G and X
G, X = state
k1, k2, k3 = system.k1, system.k2, system.k3
I, Ib, Gb = system.I, system.Ib, system.Gb
dGdt = -k1 * (G - Gb) - X*G
dXdt = k3 * (I(t) - Ib) - k2 * X
return dGdt, dXdt
slope_func(system.init, 0, system)
results2, details = run_ode_solver(system, slope_func)
details
results2
plot(results.G, 'C0', label='run_simulation')
plot(results2.G, 'C2--', label='run_ode_solver')
decorate(xlabel='Time (min)', ylabel='Concentration (mg/dL)')
savefig('figs/chap18-fig02.pdf')
diff = results.G - results2.G
percent_diff = diff / results2.G * 100
percent_diff
max(abs(percent_diff))
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
source_code(run_ode_solver)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Code from the previous chapter
Step2: Interpolate the insulin data.
Step3: The glucose minimal model
Step5: Here's a version of make_system that takes the parameters and data
Step7: And here's the update function.
Step8: Before running the simulation, it is always a good idea to test the update function using the initial conditions. In this case we can veryify that the results are at least qualitatively correct.
Step10: Now run_simulation is pretty much the same as it always is.
Step11: And here's how we run it.
Step12: The results are in a TimeFrame object with one column per state variable.
Step13: The following plot shows the results of the simulation along with the actual glucose data.
Step15: Numerical solution
Step16: We can test the slope function with the initial conditions.
Step17: Here's how we run the ODE solver.
Step18: details is a ModSimSeries object with information about how the solver worked.
Step19: results is a TimeFrame with one row for each time step and one column for each state variable
Step20: Plotting the results from run_simulation and run_ode_solver, we can see that they are not very different.
Step21: The differences in G are less than 2%.
Step22: Exercises
Step23: Under the hood
|
7,862
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv('../data/date_fixed_running_data.csv')
df.head()
df['Unnamed: 0'].head()
df = pd.read_csv('../data/date_fixed_running_data.csv', parse_dates=['Date'])
df = pd.read_csv('../data/date_fixed_running_data.csv', parse_dates=[0])
df.head()
cols = ['Date', 'Miles', 'Time']
df.columns = cols
df.head()
df.rename(columns={df.columns[0]:'Date'}, inplace=True)
df.head()
df.set_index('Date', inplace=True)
df.plot()
raw_time_fmt = df.Time[0]
def get_total_seconds(raw_time):
if raw_time is np.nan:
return np.nan # if it's blank, keep it blank
hrs, mins, seconds = str(raw_time).split(':')
seconds = int(seconds) + 60 * int(mins) + 60 * 60 * int(hrs)
return seconds
print(get_total_seconds(raw_time_fmt))
df['Seconds'] = df.Time.map(get_total_seconds)
df.head(10)
df.describe()
df.fillna(0).describe()
df['Minutes'] = df['Seconds'].map(lambda x: x / 60)
df.describe()
df.plot(x='Miles', y='Minutes', kind='scatter')
df.corr()
df.corr(method='kendall')
df.corr(method='spearman')
df.boxplot('Minutes', return_type='axes')
df['Minutes'] / df['Miles']
df['Min_per_mile'] = df['Minutes'] / df['Miles']
df.describe()
df.plot(x='Miles', y='Min_per_mile', kind='scatter')
plt.ylabel("Minutes / Mile")
df.hist('Min_per_mile')
df.hist('Min_per_mile',bins=20)
df.hist('Min_per_mile',bins=20, figsize=(10,8))
plt.xlim((5, 11))
plt.ylim((0, 12))
plt.title("Minutes Per Mile Histogram")
plt.grid(False)
plt.savefig('../assets/minutes_per_mile_histogram.png')
df['Miles']
df['Miles'].plot()
df['Miles'].plot()
pd.rolling_mean(df['Miles'], 7).plot()
df['Miles'].plot()
pd.rolling_std(df['Miles'], 7).plot()
df['Miles'].plot()
pd.rolling_sum(df['Miles'], 7).plot()
df.index
df['2014-11':'2014-12']
df['2014-11':'2015-1-1']['Miles'].plot()
df['2014-8-12']
df.loc['2014-8-12']
df.head()
df.to_csv('../data/date_fixed_running_data_with_time.csv')
df.to_html('../data/date_fixed_running_data_with_time.html')
df.to_json('../data/date_fixed_running_data_with_time.json')
df.reset_index()
df['Date'] = df.index
df.index = range(df.shape[0])
df.head()
df.to_json('../data/date_fixed_running_data_with_time.json')
df.Date[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is because when you save a data frame to a csv it doesn’t label the index column. So now our column is actually the ‘zero’ column. When you use parse_dates you can use either the column name (if available) or the 0-based column index number. This happened because the index didn’t have a name when we saved the last csv.
Step2: We can import the date correctly by specifying the zero based look up or by specifying the name like we did in the last video.
Step3: Let’s go ahead and rename the columns and set our index to the dates again.
Step4: We can also rename the column using the rename method.
Step5: Now we can plot it again once we set the index.
Step6: Now one thing we’re going to want to do is get a breakdown of times for each run. Stats like minutes per mile and that sort of thing.
Step7: Now we can describe and see I recorded times for 52 of the runs.
Step8: This is a great example of what np.NaN is so great in pandas. When we do this describe we get answers unlike in numpy and it just disregards the empty values. If we were to fill it in with zeros that would drag down our statistics.
Step9: Now that we have seconds it would be useful to see minutes too. I find it hard to think in seconds.
Step10: Now let’s do a describe and plot it again.
Step11: Let’s plot Miles and Minutes together in a scatter plot. Wow that’s linear. Let’s see how correlated they are. We do this with the cor method. We can see that Miles to time are very tightly correlated (using pearson standard correlation coefficients) there are two other correlation methods that you can use, kendall Tau, and Spearman rank correlation.
Step12: Now let’s see a box plot. With these two we get a much better idea of the data. We can see that most of my runs are below an hour except for a couple that are much longer.-
Step13: Now let’s add minutes per mile, we can just divide our two series to get those numbers.
Step14: We can see that along more shorter distances, my speed can vary a lot.
Step15: Let’s see a histogram of my speeds.
Step16: seems pretty center in that 7 minutes to 7.5 minute range. Let’s see if we can get more information with more bins which we specify with the bin argument.
Step17: That’s interesting. Under 7 and then at 7.5 are the most popular. I bet that has something to do with my running distances too or the courses I choose to run.
Step18: Now another cool thing you can do with time series is see the rolling mean or rolling sum or even rolling correlations. There’s a lot of different “rolling” type things you can do.
Step19: So here’s a standard plot of our Miles again, just a line over time. To add another line to the same plot we just add more details to the box. As I was touching on the rolling values. Let’s talk about the rolling average. Now to do that I pass it a series or a data frame.
Step20: I can do the same with the rolling standard deviation or sum.
Step21: Now on the last note one thing that’s cool about date time indexes is that you can query them very naturally. If I want to get all my runs in october of 2014, I just enter that as a string.
Step22: If I want to get from November to December, I can do that as a Series.
Step23: How do you think we might go from october to January 1 2015?
Step24: Now we can specify a series this way but we can’t specific a specific date. To get a specific date’s run.
Step25: To do that we need to use loc.
Step26: now that we’ve done all this work. We should save it so that we don’t have to remember what our operations were or what stage we did them at. Now we could save it to csv like we did our other one but I wanted to illustrate all the different ways you can save this file.
Step27: One thing to note with JSON files is that they want unique indexes (because they're going to be come the keys), so we've got to give it a new index. We can do this by resetting our index or setting our index to a column.
Step28: Now there’s a LOT more you can do with date time indexing but this is about all that I wanted to cover in this video. We will get into more specifics later. By now you should be getting a lot more familiar with pandas and what the ipython + pandas workflow is.
|
7,863
|
<ASSISTANT_TASK:>
Python Code:
import toytree
newick = "((a,b),(c, d));"
tre = toytree.tree(newick)
tre.draw();
URL = "https://treebase.org/treebase-web/search/downloadATree.html?id=11298&treeid=31264"
tre = toytree.tree(URL)
tre.draw(tip_labels_align=True, height=800, width=600);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Newick tree files
Step2: An example using a URL from treebase
|
7,864
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib
import matplotlib.pyplot as mplt
from scipy import linalg
from scipy import io
### Ordinary Least Squares
### SOLVES 2-CLASS LEAST SQUARES PROBLEM
### LOAD DATA ###
### IF LoadClasses IS True, THEN LOAD DATA FROM FILES ###
### OTHERSIE, RANDOMLY GENERATE DATA ###
LoadClasses = True
TrainOutliers = False
TestOutliers = False
NOut = 20
NSampsClass = 200
NSamps = 2*NSampsClass
if LoadClasses:
### GET FILENAMES %%%
### THESE ARE THE OPTIONS ###
### LinSepC1, LinSepC2,LinSepC2Outlier (Still Linearly Separable) ###
### NonLinSepC1, NonLinSepC2, NonLinSepC22 ###
## You will need to update these filepaths for your machine:
InFile1 = '/Users/olearyd/Git/data/RSDI2017-Data-SpecClass/NonLinSepC1.mat'
InFile2 = '/Users/olearyd/Git/data/RSDI2017-Data-SpecClass/NonLinSepC2.mat'
C1Dict = io.loadmat(InFile1)
C2Dict = io.loadmat(InFile2)
C1 = C1Dict['NonLinSepC1']
C2 = C2Dict['NonLinSepC2']
if TrainOutliers:
### Let's Make Some Noise ###
Out1 = 2*np.random.rand(NOut,2)-0.5
Out2 = 2*np.random.rand(NOut,2)-0.5
C1 = np.concatenate((C1,Out1),axis=0)
C2 = np.concatenate((C2,Out2),axis=0)
NSampsClass = NSampsClass+NOut
NSamps = 2*NSampsClass
else:
### Randomly Generate Some Data
### Make a covariance using a diagonal array and rotation matrix
pi = 3.141592653589793
Lambda1 = 0.25
Lambda2 = 0.05
DiagMat = np.array([[Lambda1, 0.0],[0.0, Lambda2]])
RotMat = np.array([[np.sin(pi/4), np.cos(pi/4)], [-np.cos(pi/4), np.sin(pi/4)]])
mu1 = np.array([0,0])
mu2 = np.array([1,1])
Sigma = np.dot(np.dot(RotMat.T, DiagMat), RotMat)
C1 = np.random.multivariate_normal(mu1, Sigma, NSampsClass)
C2 = np.random.multivariate_normal(mu2, Sigma, NSampsClass)
print(Sigma)
print(C1.shape)
print(C2.shape)
### PLOT DATA ###
matplotlib.pyplot.figure(1)
matplotlib.pyplot.plot(C1[:NSampsClass, 0], C1[:NSampsClass, 1], 'bo')
matplotlib.pyplot.plot(C2[:NSampsClass, 0], C2[:NSampsClass, 1], 'ro')
matplotlib.pyplot.show()
### SET UP TARGET OUTPUTS ###
TargetOutputs = np.ones((NSamps,1))
TargetOutputs[NSampsClass:NSamps] = -TargetOutputs[NSampsClass:NSamps]
### PLOT TARGET OUTPUTS ###
matplotlib.pyplot.figure(2)
matplotlib.pyplot.plot(range(NSampsClass), TargetOutputs[range(NSampsClass)], 'b-')
matplotlib.pyplot.plot(range(NSampsClass, NSamps), TargetOutputs[range(NSampsClass, NSamps)], 'r-')
matplotlib.pyplot.show()
### FIND LEAST SQUARES SOLUTION ###
AllSamps = np.concatenate((C1,C2),axis=0)
AllSampsBias = np.concatenate((AllSamps, np.ones((NSamps,1))), axis=1)
Pseudo = linalg.pinv2(AllSampsBias)
w = Pseudo.dot(TargetOutputs)
w
### COMPUTE OUTPUTS ON TRAINING DATA ###
y = AllSampsBias.dot(w)
### PLOT OUTPUTS FROM TRAINING DATA ###
matplotlib.pyplot.figure(3)
matplotlib.pyplot.plot(range(NSamps), y, 'm')
matplotlib.pyplot.plot(range(NSamps),np.zeros((NSamps,1)), 'b')
matplotlib.pyplot.plot(range(NSamps), TargetOutputs, 'k')
matplotlib.pyplot.title('TrainingOutputs (Magenta) vs Desired Outputs (Black)')
matplotlib.pyplot.show()
### CALCULATE AND PLOT LINEAR DISCRIMINANT ###
Slope = -w[1]/w[0]
Intercept = -w[2]/w[0]
Domain = np.linspace(-1.1, 1.1, 60) # set up the descision surface domain, -1.1 to 1.1 (looking at the data), do it 60 times
Disc = Slope*Domain+Intercept
matplotlib.pyplot.figure(4)
matplotlib.pyplot.plot(C1[:NSampsClass, 0], C1[:NSampsClass, 1], 'bo')
matplotlib.pyplot.plot(C2[:NSampsClass, 0], C2[:NSampsClass, 1], 'ro')
matplotlib.pyplot.plot(Domain, Disc, 'k-')
matplotlib.pyplot.ylim([-1.1,1.3])
matplotlib.pyplot.title('Ordinary Least Squares')
matplotlib.pyplot.show()
RegConst = 0.1
AllSampsBias = np.concatenate((AllSamps, np.ones((NSamps,1))), axis=1)
AllSampsBiasT = AllSampsBias.T
XtX = AllSampsBiasT.dot(AllSampsBias)
AllSampsReg = XtX + RegConst*np.eye(3)
Pseudo = linalg.pinv2(AllSampsReg)
wr = Pseudo.dot(AllSampsBiasT.dot(TargetOutputs))
Slope = -wr[1]/wr[0]
Intercept = -wr[2]/wr[0]
Domain = np.linspace(-1.1, 1.1, 60)
Disc = Slope*Domain+Intercept
matplotlib.pyplot.figure(5)
matplotlib.pyplot.plot(C1[:NSampsClass, 0], C1[:NSampsClass, 1], 'bo')
matplotlib.pyplot.plot(C2[:NSampsClass, 0], C2[:NSampsClass, 1], 'ro')
matplotlib.pyplot.plot(Domain, Disc, 'k-')
matplotlib.pyplot.ylim([-1.1,1.3])
matplotlib.pyplot.title('Ridge Regression')
matplotlib.pyplot.show()
### COMPUTE OUTPUTS ON TRAINING DATA ###
yr = AllSampsBias.dot(wr)
### PLOT OUTPUTS FROM TRAINING DATA ###
matplotlib.pyplot.figure(6)
matplotlib.pyplot.plot(range(NSamps), yr, 'm')
matplotlib.pyplot.plot(range(NSamps),np.zeros((NSamps,1)), 'b')
matplotlib.pyplot.plot(range(NSamps), TargetOutputs, 'k')
matplotlib.pyplot.title('TrainingOutputs (Magenta) vs Desired Outputs (Black)')
matplotlib.pyplot.show()
y1 = y[range(NSampsClass)]
y2 = y[range(NSampsClass, NSamps)]
Corr1 = np.sum([y1>0])
Corr2 = np.sum([y2<0])
y1r = yr[range(NSampsClass)]
y2r = yr[range(NSampsClass, NSamps)]
Corr1r = np.sum([y1r>0])
Corr2r = np.sum([y2r<0])
print('Result for Ordinary Least Squares')
CorrClassRate=(Corr1+Corr2)/NSamps
print(Corr1 + Corr2, 'Correctly Classified for a ', round(100*CorrClassRate), '% Correct Classification \n')
print('Result for Ridge Regression')
CorrClassRater=(Corr1r+Corr2r)/NSamps
print(Corr1r + Corr2r, 'Correctly Classified for a ', round(100*CorrClassRater), '% Correct Classification \n')
### Make Confusion Matrices ###
NumClasses = 2;
Cm = np.zeros((NumClasses,NumClasses))
Cm[(0,0)] = Corr1/NSampsClass
Cm[(0,1)] = (NSampsClass-Corr1)/NSampsClass
Cm[(1,0)] = (NSampsClass-Corr2)/NSampsClass
Cm[(1,1)] = Corr2/NSampsClass
Cm = np.round(100*Cm)
print('Confusion Matrix for OLS Regression \n', Cm, '\n')
Cm = np.zeros((NumClasses,NumClasses))
Cm[(0,0)] = Corr1r/NSampsClass
Cm[(0,1)] = (NSampsClass-Corr1r)/NSampsClass
Cm[(1,0)] = (NSampsClass-Corr2r)/NSampsClass
Cm[(1,1)] = Corr2r/NSampsClass
Cm = np.round(100*Cm)
print('Confusion Matrix for Ridge Regression \n', Cm, '\n')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we read in the example data. Note that you will need to update the filepaths below to work on your machine.
Step2: Now we can plot the data.
Step3: Save this project with the name
|
7,865
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
data_path = "C:/Users/Rishu/Desktop/dATA/boston/"
boston_data=pd.read_csv(data_path+'train.csv')
boston_data.info()
boston_data.head()
boston_data_test=pd.read_csv(data_path+'test.csv')
boston_data_test.head()
boston_data.describe()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.distplot(boston_data['medv'], rug=True, color="b")
plt.axvline(boston_data['medv'].mean(), color="b", linestyle='solid', linewidth=2)
plt.axvline(boston_data['medv'].median(), color="b", linestyle='dashed', linewidth=2)
plt.show()
print ("Mean Price value :",boston_data['medv'].mean())
print ("Standard Deviation:",boston_data['medv'].std())
ax = plt.subplots(figsize = (14,7))
sns.heatmap(boston_data[['crim','indus','nox','rm','dis','rad','tax','ptratio','black','lstat','medv']].corr(),
linecolor = 'white', square=True, annot=True)
plt.show()
sns.jointplot(x='lstat', y='medv', data=boston_data, color="r", kind="reg")
plt.show()
# Pair plot of the features
sns.pairplot(boston_data[['indus','rm','ptratio','lstat','medv']])
plt.show()
fig = plt.figure(figsize=(14,7))
plt.subplot(2,2,1)
sns.distplot(boston_data['indus'], rug=True, color="b")
plt.axvline(boston_data['indus'].mean(), color="b", linestyle='solid', linewidth=2)
plt.axvline(boston_data['indus'].median(), color="b", linestyle='dashed', linewidth=2)
plt.subplot(2,2,2)
sns.distplot(boston_data['rm'], rug=True, color="r")
plt.axvline(boston_data['rm'].mean(), color="r", linestyle='solid', linewidth=2)
plt.axvline(boston_data['rm'].median(), color="r", linestyle='dashed', linewidth=2)
plt.subplot(2,2,3)
sns.distplot(boston_data['ptratio'], rug=True, color="g")
plt.axvline(boston_data['ptratio'].mean(), color="g", linestyle='solid', linewidth=2)
plt.axvline(boston_data['ptratio'].median(), color="g", linestyle='dashed', linewidth=2)
plt.subplot(2,2,4)
sns.distplot(boston_data['lstat'], rug=True, color="y")
plt.axvline(boston_data['lstat'].mean(), color="y", linestyle='solid', linewidth=2)
plt.axvline(boston_data['lstat'].median(), color="y", linestyle='dashed', linewidth=2)
plt.show()
fig = plt.figure(figsize=(14,7))
plt.subplot(2,2,1)
sns.distplot(np.log(boston_data['indus']), rug=True, color="b")
plt.axvline(np.log(boston_data['indus']).mean(), color="b", linestyle='solid', linewidth=2)
plt.axvline(np.log(boston_data['indus']).median(), color="b", linestyle='dashed', linewidth=2)
plt.subplot(2,2,2)
sns.distplot(boston_data['rm'], rug=True, color="r")
plt.axvline(boston_data['rm'].mean(), color="r", linestyle='solid', linewidth=2)
plt.axvline(boston_data['rm'].median(), color="r", linestyle='dashed', linewidth=2)
plt.subplot(2,2,3)
sns.distplot(np.log(boston_data['ptratio']), rug=True, color="g")
plt.axvline(np.log(boston_data['ptratio']).mean(), color="g", linestyle='solid', linewidth=2)
plt.axvline(np.log(boston_data['ptratio']).median(), color="g", linestyle='dashed', linewidth=2)
plt.subplot(2,2,4)
sns.distplot(np.log(boston_data['lstat']), rug=True, color="y")
plt.axvline(np.log(boston_data['lstat']).mean(), color="y", linestyle='solid', linewidth=2)
plt.axvline(np.log(boston_data['lstat']).median(), color="y", linestyle='dashed', linewidth=2)
plt.show()
fig = plt.figure(figsize=(14,7))
plt.subplot(2,2,1)
x = np.log(boston_data[['indus']])
sns.regplot(x=x, y="medv", data=boston_data, color="b")
plt.subplot(2,2,2)
x2 = boston_data[['rm']]
sns.regplot(x=x2, y="medv", data=boston_data, color="r")
plt.subplot(2,2,3)
x3 = np.log(boston_data[['ptratio']])
sns.regplot(x=x3, y="medv", data=boston_data, color="g")
plt.subplot(2,2,4)
x4 = np.log(boston_data[['lstat']])
sns.regplot(x=x4, y="medv", data=boston_data, color="y")
plt.show()
boston_data['lstat_log']=np.log(boston_data['lstat'])
boston_data_test['lstat_log_test']=np.log(boston_data_test['lstat'])
#boston_data['ptratio_log']=np.log(boston_data['ptratio'])
#boston_data_test['ptratio_log_test']=np.log(boston_data_test['ptratio'])
#boston_data['indus_log']=np.log(boston_data['indus'])
#boston_data_test['indus_log_test']=np.log(boston_data_test['indus'])
X = boston_data[['rm','lstat_log']]
X_bd_test=boston_data_test[['rm','lstat_log_test']]
y = boston_data[['medv']]
from sklearn.model_selection import train_test_split
X_train, X_cv, y_train, y_cv = train_test_split(X, y, random_state=0)
print(len(X_train), len(y_train), len(X_cv), len(y_cv))
from sklearn.tree import DecisionTreeRegressor
max_score = 0
max_depth = 0
def decision_tree(j):
dtr = DecisionTreeRegressor(random_state=0,max_depth=j)
return dtr.fit(X_train, y_train)
for i in range(1,11):
_dtr = decision_tree(i)
clf_score = _dtr.score(X_cv,y_cv)
print("Decision Tree Regressor at max_depth:",i," scored: ",clf_score)
if clf_score>max_score:
max_score = clf_score
max_depth = i
print("The maximum score is achieved at a depth of : ",max_depth," with score of :",max_score)
dtr_clf = decision_tree(max_depth)
sns.barplot(X_train.columns, dtr_clf.feature_importances_)
plt.show()
from IPython.display import Image
import pydotplus
from sklearn.externals.six import StringIO
from sklearn import tree
dot_data = StringIO()
tree.export_graphviz(dtr_clf, out_file=dot_data,
feature_names=X_train.columns,
class_names="medv",
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
bd_predict = dtr_clf.predict(X_bd_test)
plt.scatter(boston_data_test['ID'],bd_predict)
plt.show()
print ("Mean Price value before modelling:",boston_data['medv'].mean())
print ("Mean Price value after modelling :",bd_predict.mean())
submission = pd.DataFrame({
"ID": boston_data_test['ID'],
"medv": bd_predict
})
submission.to_csv(data_path+'output.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the boston dataset - Train and Test
Step2: Understanding the distribution and relationship of the data
Step3: Plotting the target price value
Step4: From the above plot we can understand that the price of the houses ranges in an average price range of
Step5: Most co-related features
Step6: Now let us plot a distribution chart of the selected features. This would help us understand the distribution of the data a little better.
Step7: From the above dist plots we can conclude that the distribution of the data in 3 out of 4 features have skewed data distribution. The feature
Step8: After applying logarithm test to the above data set, it seems that only LSTAT is responding correctly and is getting normally distributed. PTRATIO and INDUS is not having any significant impact in the normalization.
Step9: Building the data with the selected features
Step10: Splitting the train data for train and cross-validation datasets
Step11: Model Fitting
Step12: Selecting the max depth
Step13: Training the model with max_depth
Step14: Finding the importance of feature in the regression algorithm
Step15: We can conclude that rm and lstat are two of them most important factor in the prices of the house in boston area.
Step16: Create Final Submission (Kaggle only)
Step17: Generate the test dataframe as csv output
|
7,866
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pynucastro as pyrl
files = ["c12-pg-n13-ls09",
"c13-pg-n14-nacr",
"n13--c13-wc12",
"n13-pg-o14-lg06",
"n14-pg-o15-im05",
"n15-pa-c12-nacr",
"o14--n14-wc12",
"o15--n15-wc12",
"o14-ap-f17-Ha96c",
"f17-pg-ne18-cb09",
"ne18--f18-wc12",
"f18-pa-o15-il10"]
rc = pyrl.RateCollection(files)
comp = pyrl.Composition(rc.get_nuclei())
comp.set_solar_like()
re = pyrl.Explorer(rc, comp)
re.explore()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This collection of rates has the main CNO rates plus a breakout rate into the hot CNO cycle
Step2: To evaluate the rates, we need a composition. This is defined using a list of Nuceli objects.
Step3: Interactive exploration is enabled through the Explorer class, which takes a RateCollection and a Composition
|
7,867
|
<ASSISTANT_TASK:>
Python Code:
import logging
from conf import LisaLogging
LisaLogging.setup()
# Generate plots inline
%matplotlib inline
import os
# Support to access the remote target
import devlib
from env import TestEnv
# RTApp configurator for generation of PERIODIC tasks
from wlgen import RTA, Ramp
# Setup target configuration
my_conf = {
# Target platform and board
"platform" : 'linux',
"board" : 'juno',
"host" : '192.168.0.1',
# Folder where all the results will be collected
"results_dir" : "EnergyMeter_HWMON",
# Energy Meters Configuration for BayLibre's ACME Cape
"emeter" : {
"instrument" : "hwmon",
"conf" : {
# Prefixes of the HWMon labels
'sites' : ['a53', 'a57'],
# Type of hardware monitor to be used
'kinds' : ['energy']
},
'channel_map' : {
'LITTLE' : 'a53',
'big' : 'a57',
}
},
# Tools required by the experiments
"tools" : [ 'trace-cmd', 'rt-app' ],
# Comment this line to calibrate RTApp in your own platform
"rtapp-calib" : {"0": 360, "1": 142, "2": 138, "3": 352, "4": 352, "5": 353},
}
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False, force_new=True)
target = te.target
# Create and RTApp RAMP task
rtapp = RTA(te.target, 'ramp', calibration=te.calibration())
rtapp.conf(kind='profile',
params={
'ramp' : Ramp(
start_pct = 60,
end_pct = 20,
delta_pct = 5,
time_s = 0.5).get()
})
# EnergyMeter Start
te.emeter.reset()
rtapp.run(out_dir=te.res_dir)
# EnergyMeter Stop and samples collection
nrg_report = te.emeter.report(te.res_dir)
logging.info("Collected data:")
!tree $te.res_dir
logging.info("Measured channels energy:")
logging.info("%s", nrg_report.channels)
logging.info("Generated energy file:")
logging.info(" %s", nrg_report.report_file)
!cat $nrg_report.report_file
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import required modules
Step2: Target Configuration
Step3: Workload Execution and Power Consumptions Samping
Step4: Power Measurements Data
|
7,868
|
<ASSISTANT_TASK:>
Python Code:
# 基础库导入
from __future__ import print_function
from __future__ import division
import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import ipywidgets
%matplotlib inline
import os
import sys
# 使用insert 0即只使用github,避免交叉使用了pip安装的abupy,导致的版本不一致问题
sys.path.insert(0, os.path.abspath('../'))
import abupy
# 使用沙盒数据,目的是和书中一样的数据环境
abupy.env.enable_example_env_ipython()
from abupy import AbuFactorAtrNStop, AbuFactorPreAtrNStop, AbuFactorCloseAtrNStop, AbuFactorBuyBreak
from abupy import abu, EMarketTargetType, AbuMetricsBase, ABuMarketDrawing, ABuProgress, ABuSymbolPd
from abupy import EMarketTargetType, EDataCacheType, EMarketSourceType, EMarketDataFetchMode, EStoreAbu, AbuUmpMainMul
from abupy import AbuUmpMainDeg, AbuUmpMainJump, AbuUmpMainPrice, AbuUmpMainWave, feature, AbuFeatureDegExtend
from abupy import AbuUmpEdgeDeg, AbuUmpEdgePrice, AbuUmpEdgeWave, AbuUmpEdgeFull, AbuUmpEdgeMul, AbuUmpEegeDegExtend
from abupy import AbuUmpMainDegExtend, ump, Parallel, delayed, AbuMulPidProgress
# 关闭沙盒数据
abupy.env.disable_example_env_ipython()
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_CN
abupy.env.g_data_fetch_mode = EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
abu_result_tuple = abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='train_cn')
abu_result_tuple_test = abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_cn')
ABuProgress.clear_output()
print('训练集结果:')
metrics_train = AbuMetricsBase.show_general(*abu_result_tuple, returns_cmp=True ,only_info=True)
print('测试集结果:')
metrics_test = AbuMetricsBase.show_general(*abu_result_tuple_test, returns_cmp=True, only_info=True)
# 需要全局设置为A股市场,在ump会根据市场类型保存读取对应的ump
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_CN
ump_deg=None
ump_mul=None
ump_price=None
ump_main_deg_extend=None
# 使用训练集交易数据训练主裁
orders_pd_train_cn = abu_result_tuple.orders_pd
def train_main_ump():
print('AbuUmpMainDeg begin...')
AbuUmpMainDeg.ump_main_clf_dump(orders_pd_train_cn, save_order=False, show_order=False)
print('AbuUmpMainPrice begin...')
AbuUmpMainPrice.ump_main_clf_dump(orders_pd_train_cn, save_order=False, show_order=False)
print('AbuUmpMainMul begin...')
AbuUmpMainMul.ump_main_clf_dump(orders_pd_train_cn, save_order=False, show_order=False)
print('AbuUmpMainDegExtend begin...')
AbuUmpMainDegExtend.ump_main_clf_dump(orders_pd_train_cn, save_order=False, show_order=False)
# 依然使用load_main_ump,避免下面多进程内存拷贝过大
load_main_ump()
def load_main_ump():
global ump_deg, ump_mul, ump_price, ump_main_deg_extend
ump_deg = AbuUmpMainDeg(predict=True)
ump_mul = AbuUmpMainMul(predict=True)
ump_price = AbuUmpMainPrice(predict=True)
ump_main_deg_extend = AbuUmpMainDegExtend(predict=True)
print('load main ump complete!')
def select(select):
if select == 'train main ump':
train_main_ump()
else:
load_main_ump()
_ = ipywidgets.interact_manual(select, select=['train main ump', 'load main ump'])
# 选取有交易结果的数据order_has_result
order_has_result = abu_result_tuple_test.orders_pd[abu_result_tuple_test.orders_pd.result != 0]
order_has_result.filter(regex='^buy(_deg_|_price_|_wave_|_jump)').head()
def apply_ml_features_ump(order, predicter, progress, need_hit_cnt):
if not isinstance(order.ml_features, dict):
import ast
# 低版本pandas dict对象取出来会成为str
ml_features = ast.literal_eval(order.ml_features)
else:
ml_features = order.ml_features
progress.show()
# 将交易单中的买入时刻特征传递给ump主裁决策器,让每一个主裁来决策是否进行拦截
return predicter.predict_kwargs(need_hit_cnt=need_hit_cnt, **ml_features)
def pararllel_func(ump_object, ump_name):
with AbuMulPidProgress(len(order_has_result), '{} complete'.format(ump_name)) as progress:
# 启动多进程进度条,对order_has_result进行apply
ump_result = order_has_result.apply(apply_ml_features_ump, axis=1, args=(ump_object, progress, 2,))
return ump_name, ump_result
if sys.version_info > (3, 4, 0):
# python3.4以上并行处理4个主裁,每一个主裁启动一个进程进行拦截决策
parallel = Parallel(
n_jobs=4, verbose=0, pre_dispatch='2*n_jobs')
out = parallel(delayed(pararllel_func)(ump_object, ump_name)
for ump_object, ump_name in zip([ump_deg, ump_mul, ump_price, ump_main_deg_extend],
['ump_deg', 'ump_mul', 'ump_price', 'ump_main_deg_extend']))
else:
# 3.4下由于子进程中pickle ump的内部类会找不到,所以暂时只使用一个进程一个一个的处理
out = [pararllel_func(ump_object, ump_name) for ump_object, ump_name in zip([ump_deg, ump_mul, ump_price, ump_main_deg_extend],
['ump_deg', 'ump_mul', 'ump_price', 'ump_main_deg_extend'])]
# 将每一个进程中的裁判的拦截决策进行汇总
for sub_out in out:
order_has_result[sub_out[0]] = sub_out[1]
block_pd = order_has_result.filter(regex='^ump_*')
# 把所有主裁的决策进行相加
block_pd['sum_bk'] = block_pd.sum(axis=1)
block_pd['result'] = order_has_result['result']
# 有投票1的即会进行拦截
block_pd = block_pd[block_pd.sum_bk > 0]
print('四个裁判整体拦截正确率{:.2f}%'.format(block_pd[block_pd.result == -1].result.count() / block_pd.result.count() * 100))
block_pd.tail()
from sklearn import metrics
def sub_ump_show(block_name):
sub_block_pd = block_pd[(block_pd[block_name] == 1)]
# 如果失败就正确 -1->1 1->0
sub_block_pd.result = np.where(sub_block_pd.result == -1, 1, 0)
return metrics.accuracy_score(sub_block_pd[block_name], sub_block_pd.result) * 100, sub_block_pd.result.count()
print('角度裁判拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_deg')))
print('角度扩展裁判拦拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_main_deg_extend')))
print('单混裁判拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_mul')))
print('价格裁判拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_price')))
# 需要全局设置为A股市场,在ump会根据市场类型保存读取对应的ump
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_CN
print('AbuUmpEdgeDeg begin...')
AbuUmpEdgeDeg.ump_edge_clf_dump(orders_pd_train_cn)
edge_deg = AbuUmpEdgeDeg(predict=True)
print('AbuUmpEdgePrice begin...')
AbuUmpEdgePrice.ump_edge_clf_dump(orders_pd_train_cn)
edge_price = AbuUmpEdgePrice(predict=True)
print('AbuUmpEdgeMul begin...')
AbuUmpEdgeMul.ump_edge_clf_dump(orders_pd_train_cn)
edge_mul = AbuUmpEdgeMul(predict=True)
print('AbuUmpEegeDegExtend begin...')
AbuUmpEegeDegExtend.ump_edge_clf_dump(orders_pd_train_cn)
edge_deg_extend = AbuUmpEegeDegExtend(predict=True)
print('fit edge complete!')
def apply_ml_features_edge(order, predicter, progress):
if not isinstance(order.ml_features, dict):
import ast
# 低版本pandas dict对象取出来会成为str
ml_features = ast.literal_eval(order.ml_features)
else:
ml_features = order.ml_features
# 边裁进行裁决
progress.show()
# 将交易单中的买入时刻特征传递给ump边裁决策器,让每一个边裁来决策是否进行拦截
edge = predicter.predict(**ml_features)
return edge.value
def edge_pararllel_func(edge, edge_name):
with AbuMulPidProgress(len(order_has_result), '{} complete'.format(edge_name)) as progress:
# # 启动多进程进度条,对order_has_result进行apply
edge_result = order_has_result.apply(apply_ml_features_edge, axis=1, args=(edge, progress,))
return edge_name, edge_result
if sys.version_info > (3, 4, 0):
# python3.4以上并行处理4个边裁的决策,每一个边裁启动一个进程进行拦截决策
parallel = Parallel(
n_jobs=4, verbose=0, pre_dispatch='2*n_jobs')
out = parallel(delayed(edge_pararllel_func)(edge, edge_name)
for edge, edge_name in zip([edge_deg, edge_price, edge_mul, edge_deg_extend],
['edge_deg', 'edge_price', 'edge_mul', 'edge_deg_extend']))
else:
# 3.4下由于子进程中pickle ump的内部类会找不到,所以暂时只使用一个进程一个一个的处理
out = [edge_pararllel_func(edge, edge_name) for edge, edge_name in zip([edge_deg, edge_price, edge_mul, edge_deg_extend],
['edge_deg', 'edge_price', 'edge_mul', 'edge_deg_extend'])]
# 将每一个进程中的裁判的拦截决策进行汇总
for sub_out in out:
order_has_result[sub_out[0]] = sub_out[1]
block_pd = order_has_result.filter(regex='^edge_*')
由于predict返回的结果中1代表win top
但是我们只需要知道loss_top,所以只保留-1, 其他1转换为0。
block_pd['edge_block'] = \
np.where(np.min(block_pd, axis=1) == -1, -1, 0)
# 拿出真实的交易结果
block_pd['result'] = order_has_result['result']
# 拿出-1的结果,即判定loss_top的
block_pd = block_pd[block_pd.edge_block == -1]
print('四个裁判整体拦截正确率{:.2f}%'.format(block_pd[block_pd.result == -1].result.count() /
block_pd.result.count() * 100))
print('四个边裁拦截交易总数{}, 拦截率{:.2f}%'.format(
block_pd.shape[0],
block_pd.shape[0] / order_has_result.shape[0] * 100))
block_pd.head()
from sklearn import metrics
def sub_edge_show(edge_name):
sub_edge_block_pd = order_has_result[(order_has_result[edge_name] == -1)]
return metrics.accuracy_score(sub_edge_block_pd[edge_name], sub_edge_block_pd.result) * 100, sub_edge_block_pd.shape[0]
print('角度边裁拦截正确率{0:.2f}%, 拦截交易数量{1:}'.format(*sub_edge_show('edge_deg')))
print('单混边裁拦截正确率{0:.2f}%, 拦截交易数量{1:}'.format(*sub_edge_show('edge_mul')))
print('价格边裁拦截正确率{0:.2f}%, 拦截交易数量{1:}'.format(*sub_edge_show('edge_price')))
print('角度扩展边裁拦截正确率{0:.2f}%, 拦截交易数量{1:}'.format(*sub_edge_show('edge_deg_extend')))
# 开启内置主裁
abupy.env.g_enable_ump_main_deg_block = True
abupy.env.g_enable_ump_main_price_block = True
# 开启内置边裁
abupy.env.g_enable_ump_edge_deg_block = True
abupy.env.g_enable_ump_edge_price_block = True
# 回测时需要开启特征生成,因为裁判开启需要生成特征做为输入
abupy.env.g_enable_ml_feature = True
# 回测时使用上一次切割好的测试集数据
abupy.env.g_enable_last_split_test = True
abupy.beta.atr.g_atr_pos_base = 0.05
feature.clear_user_feature()
# 10,30,50,90,120日走势拟合角度特征的AbuFeatureDegExtend,做为回测时的新的视角来录制比赛
feature.append_user_feature(AbuFeatureDegExtend)
# 打开使用用户自定义裁判开关
ump.manager.g_enable_user_ump = True
# 先clear一下
ump.manager.clear_user_ump()
# 把新的裁判AbuUmpMainDegExtend类名称使用append_user_ump添加到系统中
ump.manager.append_user_ump(AbuUmpEegeDegExtend)
# 把新的裁判AbuUmpMainDegExtend类名称使用append_user_ump添加到系统中
ump.manager.append_user_ump(AbuUmpMainDegExtend)
# 初始化资金500万
read_cash = 5000000
# 买入因子依然延用向上突破因子
buy_factors = [{'xd': 60, 'class': AbuFactorBuyBreak},
{'xd': 42, 'class': AbuFactorBuyBreak}]
# 卖出因子继续使用上一节使用的因子
sell_factors = [
{'stop_loss_n': 1.0, 'stop_win_n': 3.0,
'class': AbuFactorAtrNStop},
{'class': AbuFactorPreAtrNStop, 'pre_atr_n': 1.5},
{'class': AbuFactorCloseAtrNStop, 'close_atr_n': 1.5}
]
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_CN
abupy.env.g_data_fetch_mode = EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_CN
abupy.env.g_data_fetch_mode = EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
abu_result_tuple_test_ump = None
def run_loop_back_ump():
global abu_result_tuple_test_ump
abu_result_tuple_test_ump, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
choice_symbols=None,
start='2012-08-08', end='2017-08-08')
# 把运行的结果保存在本地,以便之后分析回测使用,保存回测结果数据代码如下所示
abu.store_abu_result_tuple(abu_result_tuple_test_ump, n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_ump_cn')
ABuProgress.clear_output()
def run_load_ump():
global abu_result_tuple_test_ump
abu_result_tuple_test_ump = abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_ump_cn')
def select_ump(select):
if select == 'run loop back ump':
run_loop_back_ump()
else:
run_load_ump()
_ = ipywidgets.interact_manual(select_ump, select=['run loop back ump', 'load test ump data'])
AbuMetricsBase.show_general(*abu_result_tuple_test_ump, returns_cmp=True, only_info=True)
AbuMetricsBase.show_general(*abu_result_tuple_test, returns_cmp=True, only_info=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 下面读取上一节存储的训练集和测试集回测数据,如下所示:
Step2: 1. A股训练集主裁训练
Step3: 2. 验证A股主裁是否称职
Step4: order_has_result的交易单中记录了所买入时刻的交易特征,如下所示:
Step5: 可以通过一个一个迭代交易单,将交易单中的买入时刻特征传递给ump主裁决策器,让每一个主裁来决策是否进行拦截,这样可以统计每一个主裁的拦截成功率,以及整体拦截率等,如下所示:
Step6: 通过把所有主裁的决策进行相加, 如果有投票1的即会进行拦截,四个裁判整体拦截正确率统计:
Step7: 下面统计每一个主裁的拦截正确率:
Step8: 3. A股训练集边裁训练
Step9: 4. 验证A股边裁是否称职
Step11: 通过把所有边裁的决策进行统计, 如果有投票-1的结果即判定loss_top的拿出来和真实交易结果result组成结果集,统计四个边裁的整体拦截正确率以及拦截率,如下所示:
Step12: 下面再统计每一个 边裁的拦截正确率:
Step13: 4. 在abu系统中开启主裁拦截模式,开启边裁拦截模式
Step14: 用户自定义裁判的开启在‘第18节 自定义裁判决策交易‘ 也示例过,通过ump.manager.append_user_ump即可
Step15: 买入因子,卖出因子等依然使用相同的设置,如下所示:
Step16: 完成裁判组合的开启,即可开始回测,回测操作流程和之前的操作一样:
Step17: 下面对比针对A股市场测试集交易开启主裁,边裁拦截和未开启主裁,边裁,结果可以看出拦截了接近一半的交易,胜率以及盈亏比都有大幅度提高:
|
7,869
|
<ASSISTANT_TASK:>
Python Code:
######################################
###THIS IS PSEUDOCODE; WILL NOT RUN###
######################################
###STEP 1: COMPUTATION OF FOREGROUND PROBABILITY###
cdfMapVolume = []
for image in volume:
#Get a distribution of intensities of the slice
dist = generateDistribution(image)
#Get the cdf for every voxel in the image slice
cdfMap = zeros_like(dist)
for y in image.height:
for x in image.width:
cdfMap[y][x] = dist.cdf(image[y][x])
cdfMapVolume.apend(cdfMap)
####Step 2: Probability of 2D Puncta
filteredVolume = zeros_like(cdfMapVolume)
for z in cdfMapVolume.depth:
for y in cdfMapVolume.height:
for x in cdfMapVolume.width:
#NOTE: boxFilter apples a box filter(duh...) of size at the given zyx
#this boxFilter is equivalent to the product of all elements that are <=size
#away from the zyx center, and are on the same z slice.
filteredVolume[z][y][x] = boxFilter(cdfMapVolume, z, y, x, size)
####Step 3: Probability of 3D Puncta
finalVolume = zeros_like(filteredVolume)
for z in filteredVolume.depth:
for y in filteredVolume.height:
for x in filteredVolume.width:
#getSquaredError calculates the sum of the squared error between the provided zyx voxel
#and the voxel at the same yx points within size distance of z (i.e. up and down z axis)
finalVolume[z][y][x] = filteredVolume[z][y][x] * exp(-1 * getSquaredError(filteredVolume, z, y, x, size))
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
import sys
sys.path.insert(0,'../code/functions/')
twoClusterDat = pickle.load(open('../code/tests/synthDat/twoPerfectClusters.synth'))
all100Dat = pickle.load(open('../code/tests/synthDat/all100.synth'))
#The two cluster data
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
z, y, x = twoClusterDat.nonzero()
ax.scatter(x, y, z, zdir='z', c='r')
plt.show()
#The uniform data
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
z, y, x = all100Dat.nonzero()
ax.scatter(x, y, z, zdir='z', c='r')
plt.show()
#Get the implemented library code
import plosLib as pLib
import connectLib as cLib
twoClusterResults = pLib.pipeline(twoClusterDat)
bianTwoClusterResults = cLib.otsuVox(twoClusterResults)
#The binary two cluster results
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
z, y, x = bianTwoClusterResults.nonzero()
ax.scatter(x, y, z, zdir='z', c='r')
plt.show()
uniformResults = pLib.pipeline(all100Dat)
bianUniformResults = cLib.otsuVox(uniformResults)
#The binary two cluster results
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
z, y, x = uniformResults.nonzero()
ax.scatter(x, y, z, zdir='z', c='r')
plt.show()
from random import randrange as rand
from skimage.measure import label
def generatePointSet():
center = (rand(0, 99), rand(0, 99), rand(0, 99))
toPopulate = []
for z in range(-1, 2):
for y in range(-1, 2):
for x in range(-1, 2):
curPoint = (center[0]+z, center[1]+y, center[2]+x)
#only populate valid points
valid = True
for dim in range(3):
if curPoint[dim] < 0 or curPoint[dim] >= 100:
valid = False
if valid:
toPopulate.append(curPoint)
return set(toPopulate)
def generateTestVolume():
#create a test volume
volume = np.zeros((100, 100, 100))
myPointSet = set()
for _ in range(rand(500, 800)):
potentialPointSet = generatePointSet()
#be sure there is no overlap
while len(myPointSet.intersection(potentialPointSet)) > 0:
potentialPointSet = generatePointSet()
for elem in potentialPointSet:
myPointSet.add(elem)
#populate the true volume
for elem in myPointSet:
volume[elem[0], elem[1], elem[2]] = 60000
#introduce noise
noiseVolume = np.copy(volume)
for z in range(noiseVolume.shape[0]):
for y in range(noiseVolume.shape[1]):
for x in range(noiseVolume.shape[2]):
if not (z, y, x) in myPointSet:
noiseVolume[z][y][x] = rand(0, 10000)
return volume, noiseVolume
-
test, noiseTest = generateTestVolume()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
z, y, x = test.nonzero()
ax.scatter(x, y, z, zdir='z', c='r')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
z, y, x = noiseTest.nonzero()
ax.scatter(x, y, z, zdir='z', c='r')
plt.show()
spd=0.
sfar=0.
srpd=0.
srfar=0.
for num in range(1,11):
print "\nExecuting Test: ", num
pd, far, rpd, rfar = executeTest()
spd+=pd
sfar+=far
srpd+=rpd
srfar+=rfar
print '\n\nAverage Performance:'
print '\tPipeline:'
print '\t\tPD: ', spd/10.
print '\t\tFAR: ', sfar/10.
print '\tRandom: '
print '\t\tPD: ', srpd/10.
print '\t\tFAR: ', srfar/10.
def generateSmallPointSet():
center = (rand(0, 99), rand(0, 99), rand(0, 99))
toPopulate = [center]
return set(toPopulate)
def generateSmallTestVolume():
#create a test volume
volume = np.zeros((100, 100, 100))
myPointSet = set()
for _ in range(rand(500, 800)):
potentialPointSet = generateSmallPointSet()
#be sure there is no overlap
while len(myPointSet.intersection(potentialPointSet)) > 0:
potentialPointSet = generatePointSet()
for elem in potentialPointSet:
myPointSet.add(elem)
#populate the true volume
for elem in myPointSet:
volume[elem[0], elem[1], elem[2]] = 60000
#introduce noise
noiseVolume = np.copy(volume)
for z in range(noiseVolume.shape[0]):
for y in range(noiseVolume.shape[1]):
for x in range(noiseVolume.shape[2]):
if not (z, y, x) in myPointSet:
noiseVolume[z][y][x] = rand(0, 10000)
return volume, noiseVolume
def executeFailureTest():
trueVolume, testVolume = generateSmallTestVolume()
labelVolume = label(trueVolume)
maxLabel = np.max(labelVolume)
#get pieline results
results = cLib.otsuVox(pLib.pipeline(testVolume))
detected = zip(*np.where(results == 1))
#get random results for p value comparison
randomResults = np.zeros_like(testVolume)
for z in range(randomResults.shape[0]):
for y in range(randomResults.shape[1]):
for x in range(randomResults.shape[2]):
randomResults[z][y][x] = rand(0, 2)
randomDetected = zip(*np.where(randomResults == 1))
#score results
numDetected = 0
numMistaken = 0
alreadyCounted = []
for point in detected:
labelPointVal = labelVolume[point[0], point[1], point[2]]
if labelPointVal != 0 and not labelPointVal in alreadyCounted:
numDetected +=1
alreadyCounted.append(labelPointVal)
if labelPointVal == 0:
numMistaken +=1
print "\tPipeline:"
print "\t\tPD: ", float(numDetected)/maxLabel
print "\t\tFAR: ", float(numMistaken)/(100 * 100 *100)
randNumDetected = 0
randNumMistaken = 0
alreadyCounted = []
for point in randomDetected:
labelPointVal = labelVolume[point[0], point[1], point[2]]
if labelPointVal != 0 and not labelPointVal in alreadyCounted:
randNumDetected +=1
alreadyCounted.append(labelPointVal)
if labelPointVal == 0:
randNumMistaken +=1
print "\tRandom:"
print "\t\tPD: ", float(randNumDetected)/maxLabel
print "\t\tFAR: ", float(randNumMistaken)/(100 * 100 *100)
return float(numDetected)/maxLabel, float(numMistaken)/(100 * 100 *100), float(randNumDetected)/maxLabel, float(randNumMistaken)/(100 * 100 *100)
spd=0.
sfar=0.
srpd=0.
srfar=0.
for num in range(1,11):
print "\nExecuting Test: ", num
pd, far, rpd, rfar = executeFailureTest()
spd+=pd
sfar+=far
srpd+=rpd
srfar+=rfar
print '\n\nAverage Performance:'
print '\tPipeline:'
print '\t\tPD: ', spd/10.
print '\t\tFAR: ', sfar/10.
print '\tRandom: '
print '\t\tPD: ', srpd/10.
print '\t\tFAR: ', srfar/10.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Write Code
Step2: 2.Generate Functionality Results
Step3: 3. Analyze Functionality Results
Step4: 2.Generate Results
Step5: 3. Analyze Results
Step6: 2. Generate Results
|
7,870
|
<ASSISTANT_TASK:>
Python Code:
# TensorFlow and tf.keras
import matplotlib.pyplot as plt
# Helper libraries
import numpy as np
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (
test_images,
test_labels,
) = fashion_mnist.load_data()
class_names = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10),
]
)
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("\nTest accuracy:", test_acc)
probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = (
predictions_array,
true_label[i],
img[i],
)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = "blue"
else:
color = "red"
plt.xlabel(
"{} {:2.0f}% ({})".format(
class_names[predicted_label],
100 * np.max(predictions_array),
class_names[true_label],
),
color=color,
)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color("red")
thisplot[true_label].set_color("blue")
i = 0
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1, 2, 2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1, 2, 2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
# Add the image to a batch where it's the only member.
img = np.expand_dims(img, 0)
print(img.shape)
predictions_single = probability_model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
np.argmax(predictions_single[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import the Fashion MNIST dataset
Step2: Loading the dataset returns four NumPy arrays
Step3: Explore the data
Step4: Likewise, there are 60,000 labels in the training set
Step5: Each label is an integer between 0 and 9
Step6: There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels
Step7: And the test set contains 10,000 images labels
Step8: Preprocess the data
Step9: Scale these values to a range of 0 to 1 before feeding them to the neural network model. To do so, divide the values by 255. It's important that the training set and the testing set be preprocessed in the same way
Step10: To verify that the data is in the correct format and that you're ready to build and train the network, let's display the first 25 images from the training set and display the class name below each image.
Step11: Build the model
Step12: The first layer in this network, tf.keras.layers.Flatten, transforms the format of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
Step13: Train the model
Step14: As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.91 (or 91%) on the training data.
Step15: It turns out that the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy represents overfitting. Overfitting happens when a machine learning model performs worse on new, previously unseen inputs than it does on the training data. An overfitted model "memorizes" the noise and details in the training dataset to a point where it negatively impacts the performance of the model on the new data. For more information, see the following
Step16: Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction
Step17: A prediction is an array of 10 numbers. They represent the model's "confidence" that the image corresponds to each of the 10 different articles of clothing. You can see which label has the highest confidence value
Step18: So, the model is most confident that this image is an ankle boot, or class_names[9]. Examining the test label shows that this classification is correct
Step19: Graph this to look at the full set of 10 class predictions.
Step20: Verify predictions
Step21: Let's plot several images with their predictions. Note that the model can be wrong even when very confident.
Step22: Use the trained model
Step23: tf.keras models are optimized to make predictions on a batch, or collection, of examples at once. Accordingly, even though you're using a single image, you need to add it to a list
Step24: Now predict the correct label for this image
Step25: keras.Model.predict returns a list of lists—one list for each image in the batch of data. Grab the predictions for our (only) image in the batch
|
7,871
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
import matplotlib.pyplot as plt
import quantities as pq
import neo
import elephant.unitary_event_analysis as ue
# Fix random seed to guarantee fixed output
random.seed(1224)
# Download data
!wget -Nq https://github.com/INM-6/elephant-tutorial-data/raw/master/dataset-1/dataset-1.h5
def plot_UE(data,Js_dict,Js_sig,binsize,winsize,winstep, pat,N,t_winpos,**kwargs):
Examples:
---------
dict_args = {'events':{'SO':[100*pq.ms]},
'save_fig': True,
'path_filename_format':'UE1.pdf',
'showfig':True,
'suptitle':True,
'figsize':(12,10),
'unit_ids':[10, 19, 20],
'ch_ids':[1,3,4],
'fontsize':15,
'linewidth':2,
'set_xticks' :False'}
'marker_size':8,
import matplotlib.pylab as plt
t_start = data[0][0].t_start
t_stop = data[0][0].t_stop
arg_dict = {'events':{},'figsize':(12,10), 'top':0.9, 'bottom':0.05, 'right':0.95,'left':0.1,
'hspace':0.5,'wspace':0.5,'fontsize':15,'unit_ids':range(1,N+1,1),
'ch_real_ids':[],'showfig':False, 'lw':2,'S_ylim':[-3,3],
'marker_size':8, 'suptitle':False, 'set_xticks':False,
'save_fig':False,'path_filename_format':'UE.pdf'}
arg_dict.update(kwargs)
num_tr = len(data)
unit_real_ids = arg_dict['unit_ids']
num_row = 5
num_col = 1
ls = '-'
alpha = 0.5
plt.figure(1,figsize = arg_dict['figsize'])
if arg_dict['suptitle'] == True:
plt.suptitle("Spike Pattern:"+ str((pat.T)[0]),fontsize = 20)
print('plotting UEs ...')
plt.subplots_adjust(top=arg_dict['top'], right=arg_dict['right'], left=arg_dict['left']
, bottom=arg_dict['bottom'], hspace=arg_dict['hspace'] , wspace=arg_dict['wspace'])
ax = plt.subplot(num_row,1,1)
ax.set_title('Unitary Events',fontsize = arg_dict['fontsize'],color = 'r')
for n in range(N):
for tr,data_tr in enumerate(data):
plt.plot(data_tr[n].rescale('ms').magnitude, np.ones_like(data_tr[n].magnitude)*tr + n*(num_tr + 1) + 1, '.', markersize=0.5,color = 'k')
sig_idx_win = np.where(Js_dict['Js']>= Js_sig)[0]
if len(sig_idx_win)>0:
x = np.unique(Js_dict['indices']['trial'+str(tr)])
if len(x)>0:
xx = []
for j in sig_idx_win:
xx =np.append(xx,x[np.where((x*binsize>=t_winpos[j]) &(x*binsize<t_winpos[j] + winsize))])
plt.plot(
np.unique(xx)*binsize, np.ones_like(np.unique(xx))*tr + n*(num_tr + 1) + 1,
ms=arg_dict['marker_size'], marker = 's', ls = '',mfc='none', mec='r')
plt.axhline((tr + 2)*(n+1) ,lw = 2, color = 'k')
y_ticks_pos = np.arange(num_tr/2 + 1,N*(num_tr+1), num_tr+1)
plt.yticks(y_ticks_pos)
plt.gca().set_yticklabels(unit_real_ids,fontsize = arg_dict['fontsize'])
for ch_cnt, ch_id in enumerate(arg_dict['ch_real_ids']):
print(ch_id)
plt.gca().text((max(t_winpos) + winsize).rescale('ms').magnitude,
y_ticks_pos[ch_cnt],'CH-'+str(ch_id),fontsize = arg_dict['fontsize'])
plt.ylim(0, (tr + 2)*(n+1) + 1)
plt.xlim(0, (max(t_winpos) + winsize).rescale('ms').magnitude)
plt.xticks([])
plt.ylabel('Unit ID',fontsize = arg_dict['fontsize'])
for key in arg_dict['events'].keys():
for e_val in arg_dict['events'][key]:
plt.axvline(e_val,ls = ls,color = 'r',lw = 2,alpha = alpha)
if arg_dict['set_xticks'] == False:
plt.xticks([])
print('plotting Raw Coincidences ...')
ax1 = plt.subplot(num_row,1,2,sharex = ax)
ax1.set_title('Raw Coincidences',fontsize = 20,color = 'c')
for n in range(N):
for tr,data_tr in enumerate(data):
plt.plot(data_tr[n].rescale('ms').magnitude,
np.ones_like(data_tr[n].magnitude)*tr + n*(num_tr + 1) + 1,
'.', markersize=0.5, color = 'k')
plt.plot(
np.unique(Js_dict['indices']['trial'+str(tr)])*binsize,
np.ones_like(np.unique(Js_dict['indices']['trial'+str(tr)]))*tr + n*(num_tr + 1) + 1,
ls = '',ms=arg_dict['marker_size'], marker = 's', markerfacecolor='none', markeredgecolor='c')
plt.axhline((tr + 2)*(n+1) ,lw = 2, color = 'k')
plt.ylim(0, (tr + 2)*(n+1) + 1)
plt.yticks(np.arange(num_tr/2 + 1,N*(num_tr+1), num_tr+1))
plt.gca().set_yticklabels(unit_real_ids,fontsize = arg_dict['fontsize'])
plt.xlim(0, (max(t_winpos) + winsize).rescale('ms').magnitude)
plt.xticks([])
plt.ylabel('Unit ID',fontsize = arg_dict['fontsize'])
for key in arg_dict['events'].keys():
for e_val in arg_dict['events'][key]:
plt.axvline(e_val,ls = ls,color = 'r',lw = 2,alpha = alpha)
print('plotting PSTH ...')
plt.subplot(num_row,1,3,sharex=ax)
#max_val_psth = 0.*pq.Hz
for n in range(N):
plt.plot(t_winpos + winsize/2.,Js_dict['rate_avg'][:,n].rescale('Hz'),
label = 'unit '+str(arg_dict['unit_ids'][n]),lw = arg_dict['lw'])
plt.ylabel('Rate [Hz]',fontsize = arg_dict['fontsize'])
plt.xlim(0, (max(t_winpos) + winsize).rescale('ms').magnitude)
max_val_psth = plt.gca().get_ylim()[1]
plt.ylim(0, max_val_psth)
plt.yticks([0, int(max_val_psth/2),int(max_val_psth)],fontsize = arg_dict['fontsize'])
plt.legend(bbox_to_anchor=(1.12, 1.05), fancybox=True, shadow=True)
for key in arg_dict['events'].keys():
for e_val in arg_dict['events'][key]:
plt.axvline(e_val,ls = ls,color = 'r',lw = arg_dict['lw'],alpha = alpha)
if arg_dict['set_xticks'] == False:
plt.xticks([])
print( 'plotting emp. and exp. coincidences rate ...')
plt.subplot(num_row,1,4,sharex=ax)
plt.plot(t_winpos + winsize/2.,Js_dict['n_emp'],label = 'empirical',lw = arg_dict['lw'],color = 'c')
plt.plot(t_winpos + winsize/2.,Js_dict['n_exp'],label = 'expected',lw = arg_dict['lw'],color = 'm')
plt.xlim(0, (max(t_winpos) + winsize).rescale('ms').magnitude)
plt.ylabel('# Coinc.',fontsize = arg_dict['fontsize'])
plt.legend(bbox_to_anchor=(1.12, 1.05), fancybox=True, shadow=True)
YTicks = plt.ylim(0,int(max(max(Js_dict['n_emp']), max(Js_dict['n_exp']))))
plt.yticks([0,YTicks[1]],fontsize = arg_dict['fontsize'])
for key in arg_dict['events'].keys():
for e_val in arg_dict['events'][key]:
plt.axvline(e_val,ls = ls,color = 'r',lw = 2,alpha = alpha)
if arg_dict['set_xticks'] == False:
plt.xticks([])
print('plotting Surprise ...')
plt.subplot(num_row,1,5,sharex=ax)
plt.plot(t_winpos + winsize/2., Js_dict['Js'],lw = arg_dict['lw'],color = 'k')
plt.xlim(0, (max(t_winpos) + winsize).rescale('ms').magnitude)
plt.axhline(Js_sig,ls = '-', color = 'gray')
plt.axhline(-Js_sig,ls = '-', color = 'gray')
plt.xticks(t_winpos.magnitude[::int(len(t_winpos)/10)])
plt.yticks([-2,0,2],fontsize = arg_dict['fontsize'])
plt.ylabel('S',fontsize = arg_dict['fontsize'])
plt.xlabel('Time [ms]', fontsize = arg_dict['fontsize'])
plt.ylim(arg_dict['S_ylim'])
for key in arg_dict['events'].keys():
for e_val in arg_dict['events'][key]:
plt.axvline(e_val,ls = ls,color = 'r',lw = arg_dict['lw'],alpha = alpha)
plt.gca().text(e_val - 10*pq.ms,2*arg_dict['S_ylim'][0],key,fontsize = arg_dict['fontsize'],color = 'r')
if arg_dict['set_xticks'] == False:
plt.xticks([])
if arg_dict['save_fig'] == True:
plt.savefig(arg_dict['path_filename_format'])
if arg_dict['showfig'] == False:
plt.cla()
plt.close()
if arg_dict['showfig'] == True:
plt.show()
block = neo.io.NeoHdf5IO("./dataset-1.h5")
sts1 = block.read_block().segments[0].spiketrains
sts2 = block.read_block().segments[1].spiketrains
spiketrains = np.vstack((sts1,sts2)).T
UE = ue.jointJ_window_analysis(
spiketrains, binsize=5*pq.ms, winsize=100*pq.ms, winstep=10*pq.ms, pattern_hash=[3])
plot_UE(
spiketrains, UE, ue.jointJ(0.05),binsize=5*pq.ms,winsize=100*pq.ms,winstep=10*pq.ms,
pat=ue.inverse_hash_from_pattern([3], N=2), N=2,
t_winpos=ue._winpos(0*pq.ms,spiketrains[0][0].t_stop,winsize=100*pq.ms,winstep=10*pq.ms))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we download a data file containing spike train data from multiple trials of two neurons.
Step3: Write a plotting function
Step4: Load data and extract spiketrains
Step5: Calculate Unitary Events
|
7,872
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from astropy.table import Table, join
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
import pickle
from tqdm import tnrange, tqdm_notebook
from IPython.display import clear_output
from mltier1 import parallel_process, SingleMLEstimator
%load_ext autoreload
%autoreload
%pylab inline
panstarrs = Table.read("panstarrs_u2.fits")
wise = Table.read("wise_u2.fits")
len(panstarrs)
len(wise)
coords_panstarrs = SkyCoord(panstarrs['raMean'], panstarrs['decMean'], unit=(u.deg, u.deg), frame='icrs')
coords_wise = SkyCoord(wise['raWise'], wise['decWise'], unit=(u.deg, u.deg), frame='icrs')
bin_list, center, q0, n_m, q_m = pickle.load(open("pw_params.pckl", "rb"))
likelihood_ratio = SingleMLEstimator(q0, n_m, q_m, center)
def get_sigma(ra1, dec1, ra1err, dec1err, ra2, dec2, ra2err, dec2err):
Input positions in degrees.
Errors in arcsecs
Output in arcsecs
cosadj = np.cos(np.deg2rad(0.5*(dec1 + dec2)))
phi = np.arctan2((dec2 - dec1), ((ra2 - ra1)*cosadj))
sigma = np.pi - phi
err1squared = (ra1err * np.cos(sigma))**2 + (dec1err * np.sin(sigma))**2
err2squared = (ra2err * np.cos(phi))**2 + (dec2err * np.sin(phi))**2
return np.sqrt(err1squared + err2squared)
radius = 15
idx_wise, idx_panstarrs, d2d, d3d = search_around_sky(
coords_wise, coords_panstarrs, radius*u.arcsec)
idx_wise_unique = np.unique(idx_wise)
wise["lr"] = np.nan # Likelihood ratio
wise["lr_dist"] = np.nan # Distance to the selected source
wise["lr_panstarrs_index"] = np.nan # Index of the PanSTARRS source
total_sources = len(idx_wise_unique)
total_sources
panstarrs_aux_index = np.arange(len(panstarrs))
def ml(i):
idx_0 = idx_panstarrs[idx_wise == i]
d2d_0 = d2d[idx_wise == i]
i_mag = panstarrs["i"][idx_0]
# sigma = get_sigma(wise["ra"][i],
# wise["dec"][i],
# wise["sigra"][i],
# wise["sigdec"][i],
# panstarrs[idx_0]["raMean"][idx_0],
# panstarrs[idx_0]["decMean"][idx_0],
# panstarrs[idx_0]["raMeanErr"][idx_0],
# panstarrs[idx_0]["decMeanErr"][idx_0],
# )
sigma = 1.
lr_0 = likelihood_ratio(i_mag, d2d_0.arcsec, sigma)
chosen_index = np.argmax(lr_0)
result = [panstarrs_aux_index[idx_0[chosen_index]], # Index
(d2d_0.arcsec)[chosen_index], # distance
lr_0[chosen_index]] # LR
return result
step_size = 100000
nsteps = total_sources//step_size + 1
res = []
for k in tnrange(nsteps, desc="Blocks"):
low_limit = k*step_size
high_limit = (k+1)*step_size
res += parallel_process(idx_wise_unique[low_limit:high_limit],
ml,
n_jobs=8,
notebook=True)
len(res)
(wise["lr_panstarrs_index"][idx_wise_unique],
wise["lr_dist"][idx_wise_unique],
wise["lr"][idx_wise_unique]) = list(map(list, zip(*res)))
wise["lr_pc"] = wise["lr"]
wise["lr_pc"][np.isnan(wise["lr_pc"])] = 0
threshold = np.percentile(wise["lr_pc"], 100*(1 - q0))
threshold
plt.rcParams["figure.figsize"] = (15,6)
subplot(1,2,1)
hist(wise[~np.isnan(wise["lr_pc"])]["lr_pc"], bins=200)
vlines([threshold], 0, 150000)
ylim([0,150000])
subplot(1,2,2)
hist(np.log10(wise[~np.isnan(wise["lr_pc"])]["lr_pc"]+1), bins=200)
vlines(np.log10(threshold+1), 0, 150000)
ylim([0,150000]);
wise["lr_index"] = wise["lr_panstarrs_index"]
wise["lr_index"][wise["lr_pc"] < threshold] = np.nan
panstarrs["lr_index"] = np.arange(len(panstarrs)).astype(float)
combined = join(wise, panstarrs, join_type='outer', keys='lr_index')
combined['ra'] = combined['raMean']
combined['dec'] = combined['decMean']
combined['raErr'] = combined['raMeanErr']
combined['decErr'] = combined['decMeanErr']
combined['ra'][np.isnan(combined['raMean'])] = combined['raWise'][np.isnan(combined['raMean'])]
combined['dec'][np.isnan(combined['decMean'])] = combined['decWise'][np.isnan(combined['decMean'])]
combined['raErr'][np.isnan(combined['raMean'])] = combined['raWiseErr'][np.isnan(combined['raMean'])]
combined['decErr'][np.isnan(combined['decMean'])] = combined['decWiseErr'][np.isnan(combined['decMean'])]
for col in ["raMean", "decMean", "raMeanErr", "decMeanErr",
"raWise", "decWise", "raWiseErr", "decWiseErr",
"ra", "dec", "raErr", "decErr"]:
combined[col].fill_value = 1e+20
columns_save = ['AllWISE', 'objID', 'ra', 'dec', 'raErr', 'decErr',
'W1mag', 'W1magErr', 'i', 'iErr']
combined[columns_save].write('pw.fits', format="fits")
np.array(combined.colnames)
combined.write('pw_lrdata.fits', format="fits")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Coordinates
Step3: Import the ML parameeters
Step4: Define the main functions
Step6: The following function could be used to get the sigma using the errors in the two optical catalogues but it may underestimate the error (not used)
Step7: Compute the ML ratio
Step8: Plot the ML results
Step9: Combine data
Step10: We combine the two catalogues using an outer join wich maintains all the data for the two catalogues
Step11: Save catalogue
Step12: Important step to solve problems with the default values for the columns. Check later why we have to do that.
Step13: We can also save a version with all the data
|
7,873
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function, unicode_literals
import pandas as pd
s = pd.Series([2,-1,3,5])
s
import numpy as np
np.exp(s)
s + [1000,2000,3000,4000]
s + 1000
s < 0
s2 = pd.Series([68, 83, 112, 68], index=["alice", "bob", "charles", "darwin"])
s2
s2["bob"]
s2[1]
s2.loc["bob"]
s2.iloc[1]
s2.iloc[1:3]
surprise = pd.Series([1000, 1001, 1002, 1003])
surprise
surprise_slice = surprise[2:]
surprise_slice
try:
surprise_slice[0]
except KeyError as e:
print("Key error:", e)
surprise_slice.iloc[0]
weights = {"alice": 68, "bob": 83, "colin": 86, "darwin": 68}
s3 = pd.Series(weights)
s3
s4 = pd.Series(weights, index = ["colin", "alice"])
s4
print(s2.keys())
print(s3.keys())
s2 + s3
s5 = pd.Series([1000,1000,1000,1000])
print("s2 =", s2.values)
print("s5 =", s5.values)
s2 + s5
meaning = pd.Series(42, ["life", "universe", "everything"])
meaning
s6 = pd.Series([83, 68], index=["bob", "alice"], name="weights")
s6
%matplotlib inline
import matplotlib.pyplot as plt
temperatures = [4.4,5.1,6.1,6.2,6.1,6.1,5.7,5.2,4.7,4.1,3.9,3.5]
s7 = pd.Series(temperatures, name="Temperature")
s7.plot()
plt.show()
dates = pd.date_range('2016/10/29 5:30pm', periods=12, freq='H')
dates
temp_series = pd.Series(temperatures, dates)
temp_series
temp_series.plot(kind="bar")
plt.grid(True)
plt.show()
temp_series_freq_2H = temp_series.resample("2H")
temp_series_freq_2H
temp_series_freq_2H.plot(kind="bar")
plt.show()
temp_series_freq_2H = temp_series.resample("2H", how=np.min)
temp_series_freq_2H
temp_series_freq_15min = temp_series.resample("15Min")
temp_series_freq_15min.head(n=10) # `head` displays the top n values
temp_series_freq_15min = temp_series.resample("15Min").interpolate(method="cubic")
temp_series_freq_15min.head(n=10)
temp_series.plot(label="Period: 1 hour")
temp_series_freq_15min.plot(label="Period: 15 minutes")
plt.legend()
plt.show()
temp_series_ny = temp_series.tz_localize("America/New_York")
temp_series_ny
temp_series_paris = temp_series_ny.tz_convert("Europe/Paris")
temp_series_paris
temp_series_paris_naive = temp_series_paris.tz_localize(None)
temp_series_paris_naive
try:
temp_series_paris_naive.tz_localize("Europe/Paris")
except Exception as e:
print(type(e))
print(e)
temp_series_paris_naive.tz_localize("Europe/Paris", ambiguous="infer")
quarters = pd.period_range('2016Q1', periods=8, freq='Q')
quarters
quarters + 3
quarters.asfreq("M")
quarters.asfreq("M", how="start")
quarters.asfreq("A")
quarterly_revenue = pd.Series([300, 320, 290, 390, 320, 360, 310, 410], index = quarters)
quarterly_revenue
quarterly_revenue.plot(kind="line")
plt.show()
last_hours = quarterly_revenue.to_timestamp(how="end", freq="H")
last_hours
last_hours.to_period()
months_2016 = pd.period_range("2016", periods=12, freq="M")
one_day_after_last_days = months_2016.asfreq("D") + 1
last_bdays = one_day_after_last_days.to_timestamp() - pd.tseries.offsets.BDay()
last_bdays.to_period("H") + 9
people_dict = {
"weight": pd.Series([68, 83, 112], index=["alice", "bob", "charles"]),
"birthyear": pd.Series([1984, 1985, 1992], index=["bob", "alice", "charles"], name="year"),
"children": pd.Series([0, 3], index=["charles", "bob"]),
"hobby": pd.Series(["Biking", "Dancing"], index=["alice", "bob"]),
}
people = pd.DataFrame(people_dict)
people
people["birthyear"]
people[["birthyear", "hobby"]]
d2 = pd.DataFrame(
people_dict,
columns=["birthyear", "weight", "height"],
index=["bob", "alice", "eugene"]
)
d2
values = [
[1985, np.nan, "Biking", 68],
[1984, 3, "Dancing", 83],
[1992, 0, np.nan, 112]
]
d3 = pd.DataFrame(
values,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
masked_array = np.ma.asarray(values, dtype=np.object)
masked_array[(0, 2), (1, 2)] = np.ma.masked
d3 = pd.DataFrame(
values,
columns=["birthyear", "children", "hobby", "weight"],
index=["alice", "bob", "charles"]
)
d3
d4 = pd.DataFrame(
d3,
columns=["hobby", "children"],
index=["alice", "bob"]
)
d4
people = pd.DataFrame({
"birthyear": {"alice":1985, "bob": 1984, "charles": 1992},
"hobby": {"alice":"Biking", "bob": "Dancing"},
"weight": {"alice":68, "bob": 83, "charles": 112},
"children": {"bob": 3, "charles": 0}
})
people
d5 = pd.DataFrame(
{
("public", "birthyear"):
{("Paris","alice"):1985, ("Paris","bob"): 1984, ("London","charles"): 1992},
("public", "hobby"):
{("Paris","alice"):"Biking", ("Paris","bob"): "Dancing"},
("private", "weight"):
{("Paris","alice"):68, ("Paris","bob"): 83, ("London","charles"): 112},
("private", "children"):
{("Paris", "alice"):np.nan, ("Paris","bob"): 3, ("London","charles"): 0}
}
)
d5
d5["public"]
d5["public", "hobby"] # Same result as d4["public"]["hobby"]
d5
d5.columns = d5.columns.droplevel(level = 0)
d5
d6 = d5.T
d6
d7 = d6.stack()
d7
d8 = d7.unstack()
d8
d9 = d8.unstack()
d9
d10 = d9.unstack(level = (0,1))
d10
people
people.loc["charles"]
people.iloc[2]
people.iloc[1:3]
people[np.array([True, False, True])]
people[people["birthyear"] < 1990]
people
people["age"] = 2016 - people["birthyear"] # adds a new column "age"
people["over 30"] = people["age"] > 30 # adds another column "over 30"
birthyears = people.pop("birthyear")
del people["children"]
people
birthyears
people["pets"] = pd.Series({"bob": 0, "charles": 5, "eugene":1}) # alice is missing, eugene is ignored
people
people.insert(1, "height", [172, 181, 185])
people
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
has_pets = people["pets"] > 0
)
try:
people.assign(
body_mass_index = people["weight"] / (people["height"] / 100) ** 2,
overweight = people["body_mass_index"] > 25
)
except KeyError as e:
print("Key error:", e)
d6 = people.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
d6.assign(overweight = d6["body_mass_index"] > 25)
try:
(people
.assign(body_mass_index = people["weight"] / (people["height"] / 100) ** 2)
.assign(overweight = people["body_mass_index"] > 25)
)
except KeyError as e:
print("Key error:", e)
(people
.assign(body_mass_index = lambda df: df["weight"] / (df["height"] / 100) ** 2)
.assign(overweight = lambda df: df["body_mass_index"] > 25)
)
people.eval("weight / (height/100) ** 2 > 25")
people.eval("body_mass_index = weight / (height/100) ** 2")
people
overweight_threshold = 30
people.eval("overweight = body_mass_index > @overweight_threshold")
people
people.query("age > 30 and pets == 0")
people.sort_index(ascending=False)
people.sort_index(axis=1, inplace=True)
people
people.sort_values(by="age", inplace=True)
people
people.plot(kind = "line", x = "body_mass_index", y = ["height", "weight"])
plt.show()
people.plot(kind = "scatter", x = "height", y = "weight", s=[40, 120, 200])
plt.show()
grades_array = np.array([[8,8,9],[10,9,9],[4, 8, 2], [9, 10, 10]])
grades = pd.DataFrame(grades_array, columns=["sep", "oct", "nov"], index=["alice","bob","charles","darwin"])
grades
np.sqrt(grades)
grades + 1
grades >= 5
grades.mean()
(grades > 5).all()
(grades > 5).all(axis = 1)
(grades == 10).any(axis = 1)
grades - grades.mean() # equivalent to: grades - [7.75, 8.75, 7.50]
pd.DataFrame([[7.75, 8.75, 7.50]]*4, index=grades.index, columns=grades.columns)
grades - grades.values.mean() # substracts the global mean (8.00) from all grades
bonus_array = np.array([[0,np.nan,2],[np.nan,1,0],[0, 1, 0], [3, 3, 0]])
bonus_points = pd.DataFrame(bonus_array, columns=["oct", "nov", "dec"], index=["bob","colin", "darwin", "charles"])
bonus_points
grades + bonus_points
(grades + bonus_points).fillna(0)
fixed_bonus_points = bonus_points.fillna(0)
fixed_bonus_points.insert(0, "sep", 0)
fixed_bonus_points.loc["alice"] = 0
grades + fixed_bonus_points
bonus_points
bonus_points.interpolate(axis=1)
better_bonus_points = bonus_points.copy()
better_bonus_points.insert(0, "sep", 0)
better_bonus_points.loc["alice"] = 0
better_bonus_points = better_bonus_points.interpolate(axis=1)
better_bonus_points
grades + better_bonus_points
grades["dec"] = np.nan
final_grades = grades + better_bonus_points
final_grades
final_grades_clean = final_grades.dropna(how="all")
final_grades_clean
final_grades_clean = final_grades_clean.dropna(axis=1, how="all")
final_grades_clean
final_grades["hobby"] = ["Biking", "Dancing", np.nan, "Dancing", "Biking"]
final_grades
grouped_grades = final_grades.groupby("hobby")
grouped_grades
grouped_grades.mean()
bonus_points
more_grades = final_grades_clean.stack().reset_index()
more_grades.columns = ["name", "month", "grade"]
more_grades["bonus"] = [np.nan, np.nan, np.nan, 0, np.nan, 2, 3, 3, 0, 0, 1, 0]
more_grades
pd.pivot_table(more_grades, index="name")
pd.pivot_table(more_grades, index="name", values=["grade","bonus"], aggfunc=np.max)
pd.pivot_table(more_grades, index="name", values="grade", columns="month", margins=True)
pd.pivot_table(more_grades, index=("name", "month"), margins=True)
much_data = np.fromfunction(lambda x,y: (x+y*y)%17*11, (10000, 26))
large_df = pd.DataFrame(much_data, columns=list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
large_df[large_df % 16 == 0] = np.nan
large_df.insert(3,"some_text", "Blabla")
large_df
large_df.head()
large_df.tail(n=2)
large_df.info()
large_df.describe()
my_df = pd.DataFrame(
[["Biking", 68.5, 1985, np.nan], ["Dancing", 83.1, 1984, 3]],
columns=["hobby","weight","birthyear","children"],
index=["alice", "bob"]
)
my_df
my_df.to_csv("my_df.csv")
my_df.to_html("my_df.html")
my_df.to_json("my_df.json")
for filename in ("my_df.csv", "my_df.html", "my_df.json"):
print("#", filename)
with open(filename, "rt") as f:
print(f.read())
print()
try:
my_df.to_excel("my_df.xlsx", sheet_name='People')
except ImportError as e:
print(e)
my_df_loaded = pd.read_csv("my_df.csv", index_col=0)
my_df_loaded
us_cities = None
try:
csv_url = "http://simplemaps.com/files/cities.csv"
us_cities = pd.read_csv(csv_url, index_col=0)
us_cities = us_cities.head()
except IOError as e:
print(e)
us_cities
city_loc = pd.DataFrame(
[
["CA", "San Francisco", 37.781334, -122.416728],
["NY", "New York", 40.705649, -74.008344],
["FL", "Miami", 25.791100, -80.320733],
["OH", "Cleveland", 41.473508, -81.739791],
["UT", "Salt Lake City", 40.755851, -111.896657]
], columns=["state", "city", "lat", "lng"])
city_loc
city_pop = pd.DataFrame(
[
[808976, "San Francisco", "California"],
[8363710, "New York", "New-York"],
[413201, "Miami", "Florida"],
[2242193, "Houston", "Texas"]
], index=[3,4,5,6], columns=["population", "city", "state"])
city_pop
pd.merge(left=city_loc, right=city_pop, on="city")
all_cities = pd.merge(left=city_loc, right=city_pop, on="city", how="outer")
all_cities
pd.merge(left=city_loc, right=city_pop, on="city", how="right")
city_pop2 = city_pop.copy()
city_pop2.columns = ["population", "name", "state"]
pd.merge(left=city_loc, right=city_pop2, left_on="city", right_on="name")
result_concat = pd.concat([city_loc, city_pop])
result_concat
result_concat.loc[3]
pd.concat([city_loc, city_pop], ignore_index=True)
pd.concat([city_loc, city_pop], join="inner")
pd.concat([city_loc, city_pop], axis=1)
pd.concat([city_loc.set_index("city"), city_pop.set_index("city")], axis=1)
city_loc.append(city_pop)
city_eco = city_pop.copy()
city_eco["eco_code"] = [17, 17, 34, 20]
city_eco
city_eco["economy"] = city_eco["eco_code"].astype('category')
city_eco["economy"].cat.categories
city_eco["economy"].cat.categories = ["Finance", "Energy", "Tourism"]
city_eco
city_eco.sort_values(by="economy", ascending=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's import pandas. People usually import it as pd
Step2: Series objects
Step3: Similar to a 1D ndarray
Step4: Arithmetic operations on Series are also possible, and they apply elementwise, just like for ndarrays
Step5: Similar to NumPy, if you add a single number to a Series, that number is added to all items in the Series. This is called * broadcasting*
Step6: The same is true for all binary operations such as * or /, and even conditional operations
Step7: Index labels
Step8: You can then use the Series just like a dict
Step9: You can still access the items by integer location, like in a regular array
Step10: To make it clear when you are accessing by label or by integer location, it is recommended to always use the loc attribute when accessing by label, and the iloc attribute when accessing by integer location
Step11: Slicing a Series also slices the index labels
Step12: This can lead to unexpected results when using the default numeric labels, so be careful
Step13: Oh look! The first element has index label 2. The element with index label 0 is absent from the slice
Step14: But remember that you can access elements by integer location using the iloc attribute. This illustrates another reason why it's always better to use loc and iloc to access Series objects
Step15: Init from dict
Step16: You can control which elements you want to include in the Series and in what order by explicitly specifying the desired index
Step17: Automatic alignment
Step18: The resulting Series contains the union of index labels from s2 and s3. Since "colin" is missing from s2 and "charles" is missing from s3, these items have a NaN result value. (ie. Not-a-Number means missing).
Step19: Pandas could not align the Series, since their labels do not match at all, hence the full NaN result.
Step20: Series name
Step21: Plotting a Series
Step22: There are many options for plotting your data. It is not necessary to list them all here
Step23: This DatetimeIndex may be used as an index in a Series
Step24: Let's plot this series
Step25: Resampling
Step26: Let's take a look at the result
Step27: Note how the values have automatically been aggregated into 2-hour periods. If we look at the 6-8pm period, for example, we had a value of 5.1 at 6
Step28: Upsampling and interpolation
Step29: One solution is to fill the gaps by interpolating. We just call the interpolate method. The default is to use linear interpolation, but we can also select another method, such as cubic interpolation
Step30: Timezones
Step31: Note that -04
Step32: You may have noticed that the UTC offset changes from +02
Step33: Now 02
Step34: Fortunately using the ambiguous argument we can tell pandas to infer the right DST (Daylight Saving Time) based on the order of the ambiguous timestamps
Step35: Periods
Step36: Adding a number N to a PeriodIndex shifts the periods by N times the PeriodIndex's frequency
Step37: The asfreq method lets us change the frequency of the PeriodIndex. All periods are lengthened or shortened accordingly. For example, let's convert all the quarterly periods to monthly periods (zooming in)
Step38: By default, the asfreq zooms on the end of each period. We can tell it to zoom on the start of each period instead
Step39: And we can zoom out
Step40: Of course we can create a Series with a PeriodIndex
Step41: We can convert periods to timestamps by calling to_timestamp. By default this will give us the first day of each period, but by setting how and freq, we can get the last hour of each period
Step42: And back to periods by calling to_period
Step43: Pandas also provides many other time-related functions that we recommend you check out in the documentation. To whet your appetite, here is one way to get the last business day of each month in 2016, at 9am
Step44: DataFrame objects
Step45: A few things to note
Step46: You can also get multiple columns at once
Step47: If you pass a list of columns and/or index row labels to the DataFrame constructor, it will guarantee that these columns and/or rows will exist, in that order, and no other column/row will exist. For example
Step48: Another convenient way to create a DataFrame is to pass all the values to the constructor as an ndarray, or a list of lists, and specify the column names and row index labels separately
Step49: To specify missing values, you can either use np.nan or NumPy's masked arrays
Step50: Instead of an ndarray, you can also pass a DataFrame object
Step51: It is also possible to create a DataFrame with a dictionary (or list) of dictionaries (or list)
Step52: Multi-indexing
Step53: You can now get a DataFrame containing all the "public" columns very simply
Step54: Dropping a level
Step55: There are two levels of columns, and two levels of indices. We can drop a column level by calling droplevel (the same goes for indices)
Step56: Transposing
Step57: Stacking and unstacking levels
Step58: Note that many NaN values appeared. This makes sense because many new combinations did not exist before (eg. there was no bob in London).
Step59: If we call unstack again, we end up with a Series object
Step60: The stack and unstack methods let you select the level to stack/unstack. You can even stack/unstack multiple levels at once
Step61: Most methods return modified copies
Step62: The loc attribute lets you access rows instead of columns. The result is Series object in which the DataFrame's column names are mapped to row index labels
Step63: You can also access rows by integer location using the iloc attribute
Step64: You can also get a slice of rows, and this returns a DataFrame object
Step65: Finally, you can pass a boolean array to get the matching rows
Step66: This is most useful when combined with boolean expressions
Step67: Adding and removing columns
Step68: When you add a new colum, it must have the same number of rows. Missing rows are filled with NaN, and extra rows are ignored
Step69: When adding a new column, it is added at the end (on the right) by default. You can also insert a column anywhere else using the insert method
Step70: Assigning new columns
Step71: Note that you cannot access columns created within the same assignment
Step72: The solution is to split this assignment in two consecutive assignments
Step73: Having to create a temporary variable d6 is not very convenient. You may want to just chain the assigment calls, but it does not work because the people object is not actually modified by the first assignment
Step74: But fear not, there is a simple solution. You can pass a function to the assign method (typically a lambda function), and this function will be called with the DataFrame as a parameter
Step75: Problem solved!
Step76: Assignment expressions are also supported, and contrary to the assign method, this does not create a copy of the DataFrame, instead it directly modifies it
Step77: You can use a local or global variable in an expression by prefixing it with '@'
Step78: Querying a DataFrame
Step79: Sorting a DataFrame
Step80: Note that sort_index returned a sorted copy of the DataFrame. To modify people directly, we can set the inplace argument to True. Also, we can sort the columns instead of the rows by setting axis=1
Step81: To sort the DataFrame by the values instead of the labels, we can use sort_values and specify the column to sort by
Step82: Plotting a DataFrame
Step83: You can pass extra arguments supported by matplotlib's functions. For example, we can create scatterplot and pass it a list of sizes using the s argument of matplotlib's scatter function
Step84: Again, there are way too many options to list here
Step85: You can apply NumPy mathematical functions on a DataFrame
Step86: Similarly, adding a single value to a DataFrame will add that value to all elements in the DataFrame. This is called broadcasting
Step87: Of course, the same is true for all other binary operations, including arithmetic (*,/,**...) and conditional (>, ==...) operations
Step88: Aggregation operations, such as computing the max, the sum or the mean of a DataFrame, apply to each column, and you get back a Series object
Step89: The all method is also an aggregation operation
Step90: Most of these functions take an optional axis parameter which lets you specify along which axis of the DataFrame you want the operation executed. The default is axis=0, meaning that the operation is executed vertically (on each column). You can set axis=1 to execute the operation horizontally (on each row). For example, let's find out which students had all grades greater than 5
Step91: The any method returns True if any value is True. Let's see who got at least one grade 10
Step92: If you add a Series object to a DataFrame (or execute any other binary operation), pandas attempts to broadcast the operation to all rows in the DataFrame. This only works if the Series has the same size as the DataFrames rows. For example, let's substract the mean of the DataFrame (a Series object) from the DataFrame
Step93: We substracted 7.75 from all September grades, 8.75 from October grades and 7.50 from November grades. It is equivalent to substracting this DataFrame
Step94: If you want to substract the global mean from every grade, here is one way to do it
Step95: Automatic alignment
Step96: Looks like the addition worked in some cases but way too many elements are now empty. That's because when aligning the DataFrames, some columns and rows were only present on one side, and thus they were considered missing on the other side (NaN). Then adding NaN to a number results in NaN, hence the result.
Step97: It's a bit unfair that we're setting grades to zero in September, though. Perhaps we should decide that missing grades are missing grades, but missing bonus points should be replaced by zeros
Step98: That's much better
Step99: Now let's call the interpolate method. By default, it interpolates vertically (axis=0), so let's tell it to interpolate horizontally (axis=1).
Step100: Bob had 0 bonus points in October, and 2 in December. When we interpolate for November, we get the mean
Step101: Great, now we have reasonable bonus points everywhere. Let's find out the final grades
Step102: It is slightly annoying that the September column ends up on the right. This is because the DataFrames we are adding do not have the exact same columns (the grades DataFrame is missing the "dec" column), so to make things predictable, pandas orders the final columns alphabetically. To fix this, we can simply add the missing column before adding
Step103: There's not much we can do about December and Colin
Step104: Now let's remove columns that are full of NaNs by setting the axis argument to 1
Step105: Aggregating with groupby
Step106: Now let's group data in this DataFrame by hobby
Step107: We are ready to compute the average grade per hobby
Step108: That was easy! Note that the NaN values have simply been skipped when computing the means.
Step109: Now we can call the pivot_table function for this DataFrame, asking to group by the name column. By default, pivot_table computes the mean of each numeric column
Step110: We can change the aggregation function by setting the aggfunc attribute, and we can also specify the list of columns whose values will be aggregated
Step111: We can also specify the columns to aggregate over horizontally, and request the grand totals for each row and column by setting margins=True
Step112: Finally, we can specify multiple index or column names, and pandas will create multi-level indices
Step113: Overview functions
Step114: The head method returns the top 5 rows
Step115: Of course there's also a tail function to view the bottom 5 rows. You can pass the number of rows you want
Step116: The info method prints out a summary of each columns contents
Step117: Finally, the describe method gives a nice overview of the main aggregated values over each column
Step118: Saving & loading
Step119: Saving
Step120: Done! Let's take a peek at what was saved
Step121: Note that the index is saved as the first column (with no name) in a CSV file, as <th> tags in HTML and as keys in JSON.
Step122: Loading
Step123: As you might guess, there are similar read_json, read_html, read_excel functions as well. We can also read data straight from the Internet. For example, let's load all U.S. cities from simplemaps.com
Step124: There are more options available, in particular regarding datetime format. Check out the documentation for more details.
Step125: Now let's join these DataFrames using the merge function
Step126: Note that both DataFrames have a column named state, so in the result they got renamed to state_x and state_y.
Step127: Of course LEFT OUTER JOIN is also available by setting how="left"
Step128: If the key to join on is actually in one (or both) DataFrame's index, you must use left_index=True and/or right_index=True. If the key column names differ, you must use left_on and right_on. For example
Step129: Concatenation
Step130: Note that this operation aligned the data horizontally (by columns) but not vertically (by rows). In this example, we end up with multiple rows having the same index (eg. 3). Pandas handles this rather gracefully
Step131: Or you can tell pandas to just ignore the index
Step132: Notice that when a column does not exist in a DataFrame, it acts as if it was filled with NaN values. If we set join="inner", then only columns that exist in both DataFrames are returned
Step133: You can concatenate DataFrames horizontally instead of vertically by setting axis=1
Step134: In this case it really does not make much sense because the indices do not align well (eg. Cleveland and San Francisco end up on the same row, because they shared the index label 3). So let's reindex the DataFrames by city name before concatenating
Step135: This looks a lot like a FULL OUTER JOIN, except that the state columns were not renamed to state_x and state_y, and the city column is now the index.
Step136: As always in pandas, the append method does not actually modify city_loc
Step137: Right now the eco_code column is full of apparently meaningless codes. Let's fix that. First, we will create a new categorical column based on the eco_codes
Step138: Now we can give each category a meaningful name
Step139: Note that categorical values are sorted according to their categorical order, not their alphabetical order
|
7,874
|
<ASSISTANT_TASK:>
Python Code:
import math
print("pi = %1.15f" %math.pi)
print("pi = %1.15e" %math.pi)
import numpy as np
import scipy as scp
print(" numpy pi = %1.15f" %np.pi)
print(" scipy pi = %1.15f" %scp.pi)
print("1+2 = ", 1+2)
print("1.0+2 = ", 1.0+2)
print("1.0+2.0 = ", 1.0+2.0)
print("4/2 = ", 4/2)
print("4//2 = ", 4//2)
print("8//3 = ", 8//3)
print("8/3 = ", 8/3)
print("Integer of 8/3 = ", int(8/3))
print("(2.5,3.0) =", 2.5+3.0j)
print("2^4 = ", 2**4)
print("2.0^4 = ", 2.**4)
print("2.0^4 = ",pow(2.0,4))
a = 10.
a *= 9.
print("a = 10. then a *= 9.=",a)
print("e^1=",math.exp(1.))
print("e^(-infinity)=",math.exp(-math.inf))
print("log(e)=", math.log(math.e))
print("log_10(1000.)=",math.log10(1000.))
a = math.cos(math.pi/4.)
print("a = cos(pi/4) = %1.8f" %a)
print("arcos(a)*4./pi = ",math.acos(a)*4./math.pi)
print("1 equal 2:",1==2)
print("1 non-equal 2:",1!=2)
print("1 > 2:",1>2)
print("1 < 2:",1<2)
print("10.==sqrt(100.):",10.==math.sqrt(100.))
a = 0
if a > 0:
a -= 1
elif a == 0:
a = 1
print(a)
a = 4
b = 0
if a > 0 and a < 4:
a += 1
b = 3
else:
a = 0
b = 1
print(a,b)
a = 4
b = 0
if a > 0 or a < 4:
a += 1
b = 3
else:
a = 0
b = 1
print(a,b)
for i in range(0,5,1):
print(i)
for i in range(5):
print(i)
for i in range(3,5):
print(i)
for i in range(5,2,-1):
print(i)
a = 0
while a <=5 :
print(a)
a += 1
print("final a=",a)
import numpy as np
n = 10
a = np.zeros(n)
print("a = ",a)
m = 3
b = np.ones((m,n))
print("b = ",b)
c = np.array([1, 2, 3, 4])
print("c = ",c)
d = np.array([[1, 2], [3, 4]], dtype = np.complex)
print("d = ",d)
a = np.ones((2,2), dtype = 'float64')
for j in range(2):
for i in range(2):
a[i,j] *= np.float64(math.pi/(i+j+1))
print(a)
print("a[0,0] =",a[0,0])
n = 10
a = np.zeros(n, dtype = np.float)
for i in range(n):
a[i] = 2.**i
print("a = ",a)
b = a
b[n-1] -= 500.
print("a = ",a)
print("b = ",b)
a = 1.
b = a
b += 1.
print("a=",a)
print("b=",b)
n = 10
a = np.zeros(n)
for i in range(n):
a[i] = 2.**i
b = np.copy(a)
b[n-1] -= 500.
print("a = ",a)
print("b = ",b)
n = 10
a = np.zeros(n)
for i in range(n):
a[i] = 2.**i
b = a * 2.
print("a = ",a)
print("b = ",b)
n = 10
a = np.zeros(n)
for i in range(n):
a[i] = 2.**i
b = np.copy(a)
b *= 2.
print("a = ",a)
print("b = ",b)
n = 10
a = np.zeros(n)
for i in range(n):
a[i] = 2.**i
b = np.zeros(n)
b = a
b *= 2.
print("a = ",a)
print("b = ",b)
n = 4
a = np.zeros(n, dtype = np.float64)
for i in range(n):
a[i] = 2.**i
print("a=",a)
b = np.copy(a)
b += 1.
print("b=a+1=",b)
b = np.zeros(n, dtype = np.float64)
for i in range(n):
b[i] = np.float64(i)
print("a=",a)
print("b=",b)
print("a*b=",a*b)
print("a.b=",np.dot(a,b))
print("exp(-a)=",np.exp(-a))
print("exp(-a)=",math.exp(-a))
%matplotlib inline
# plots graphs within the notebook
%config InlineBackend.figure_format='svg' # not sure what this does, may be default images to svg format
from IPython.display import Image
from IPython.display import clear_output
from IPython.core.display import HTML
def header(text):
raw_html = '<h4>' + str(text) + '</h4>'
return raw_html
def box(text):
raw_html = '<div style="border:1px dotted black;padding:2em;">'+str(text)+'</div>'
return HTML(raw_html)
def nobox(text):
raw_html = '<p>'+str(text)+'</p>'
return HTML(raw_html)
def addContent(raw_html):
global htmlContent
htmlContent += raw_html
class PDF(object):
def __init__(self, pdf, size=(200,200)):
self.pdf = pdf
self.size = size
def _repr_html_(self):
return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size)
def _repr_latex_(self):
return r'\includegraphics[width=1.0\textwidth]{{{0}}}'.format(self.pdf)
PDF('figures/indexing.pdf',size=(800,600))
n = 6
a = np.zeros(6,dtype=np.float)
for i in range(n):
a[i] = 2**i
print("a[0:n] = ",a[0:n])
print("a[:] = ",a[:])
print("a[1:] = ",a[1:])
print("a[1:n] = ",a[1:n])
print("a[0:n] = ",a[0:n])
print("a[0:n-1] = ",a[0:n-1])
print("a[0:-1] = ",a[0:-1])
print("a[:-1] = ",a[:-1])
print("a[2:] = ",a[2:])
print("a[:-3]= ",a[:-3])
b = np.zeros(n,dtype=np.float)
b[:-1] = a[1:] - a[:-1]
print("a[1:] =",a[1:])
print("-")
print("a[:-1] =",a[:-1])
print(" =",b[:-1])
n = 100000
mu = 0.
sigma = 1.
a = np.random.normal(loc = mu, scale = sigma, size = n)
print("a[:-(n-10)] = ", a[:-(n-10)])
print("a[0:10] = ", a[0:10])
%matplotlib inline
# plots graphs within the notebook
%config InlineBackend.figure_format='svg' # not sure what this does, may be default images to svg format
from IPython.display import display,Image, Latex
from __future__ import division
from sympy.interactive import printing
printing.init_printing(use_latex='mathjax')
import time
from IPython.display import display,Image, Latex
from IPython.display import clear_output
#import SchemDraw as schem
#import SchemDraw.elements as e
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy.constants as sc
import sympy as sym
from IPython.core.display import HTML
def header(text):
raw_html = '<h4>' + str(text) + '</h4>'
return raw_html
def box(text):
raw_html = '<div style="border:1px dotted black;padding:2em;">'+str(text)+'</div>'
return HTML(raw_html)
def nobox(text):
raw_html = '<p>'+str(text)+'</p>'
return HTML(raw_html)
def addContent(raw_html):
global htmlContent
htmlContent += raw_html
class PDF(object):
def __init__(self, pdf, size=(200,200)):
self.pdf = pdf
self.size = size
def _repr_html_(self):
return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size)
def _repr_latex_(self):
return r'\includegraphics[width=1.0\textwidth]{{{0}}}'.format(self.pdf)
class ListTable(list):
Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook.
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
font = {'family' : 'serif',
#'color' : 'black',
'weight' : 'normal',
'size' : 12,
}
fontlabel = {'family' : 'serif',
#'color' : 'black',
'weight' : 'normal',
'size' : 16,
}
from matplotlib.ticker import FormatStrFormatter
plt.rc('font', **font)
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(a, 50, normed=True)
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * \
np.exp( - (bins - mu)**2 / (2 * sigma**2) ),\
linewidth=2, color='r')
plt.xlabel(r"a", fontdict = fontlabel)
plt.ylabel(r"pdf", fontdict = fontlabel)
plt.show()
n = 100
mu = 0.
sigma = 0.2
x = np.linspace(0,1.,n)
a = np.sin(2.*math.pi*x)
plt.plot(x,a, lw = 2)
plt.xlabel(r"x", fontdict = fontlabel)
plt.ylabel(r"a(x)", fontdict = fontlabel)
plt.show()
a_noisy = np.copy(a)
n_noise = 4
k_noise = 50
noise_amplitude = np.random.normal(loc = mu, scale = sigma, size = n_noise)
noise_phase = 2*math.pi*np.random.normal(loc = mu, scale = 1.0, size = n_noise)
for i in range(n_noise):
a_noisy += noise_amplitude[i]*np.sin(2*(k_noise+i)*math.pi*x + noise_phase[i])
plt.plot(x,a_noisy, lw = 2)
plt.xlabel(r"x", fontdict = fontlabel)
plt.ylabel(r"a(x)", fontdict = fontlabel)
plt.show()
print(noise_amplitude)
print(noise_phase)
a_filter = np.zeros(n)
a_filter[1:-1] = (a_noisy[0:-2] + 2. * a_noisy[1:-1] + a_noisy[2:])/4.
a_filter[0] = (a_noisy[n-2] + 2. * a_noisy[0] + a_noisy[1])/4.
a_filter[n-1] = a_filter[0]
plt.plot(x,a_noisy, linestyle="solid", lw = 1, label = r"noisy signal")
plt.plot(x,a_filter, color = "red",linestyle="solid", lw = 2, label = r"filtered signal")
plt.plot(x,a, color = "green", linestyle="None", marker = "o", markeredgecolor = "green", markeredgewidth = 1,\
markerfacecolor = "None", label = r"original signal")
plt.legend(loc="lower left", bbox_to_anchor=[0, 1],
ncol=2, shadow=True, fancybox=True)
plt.xlabel(r"$x$", fontdict = fontlabel)
plt.ylabel(r"$a(x)$", fontdict = fontlabel)
plt.show()
plt.semilogy(x,np.abs(a_noisy - a_filter), lw = 2, label=r"$\vert a_{noisy}-a_{filter}\vert$")
plt.semilogy(x,np.abs(a - a_filter), color = "red", lw = 2, label=r"$\vert a-a_{filter}\vert$")
plt.legend(loc="lower left", bbox_to_anchor=[0, 1],
ncol=2, shadow=True, fancybox=True)
plt.xlabel(r"$x$", fontdict = fontlabel)
plt.ylabel(r"difference", fontdict = fontlabel)
%%timeit
a_filter = np.zeros(n)
for i in range(1,n-1):
a_filter[i] = (a_noisy[i-1] + 2. * a_noisy[i] + a_noisy[i+1])/4.
a_filter[0] = (a_noisy[n-2] + 2. * a_noisy[0] + a_noisy[1])/4.
a_filter[n-1] = a_filter[0]
%%timeit
a_filter = np.zeros(n)
a_filter[1:-1] = (a_noisy[0:-2] + 2. * a_noisy[1:-1] + a_noisy[2:])/4.
a_filter[0] = (a_noisy[n-2] + 2. * a_noisy[0] + a_noisy[1])/4.
a_filter[n-1] = a_filter[0]
def explicit_filter(a):
n = len(a)
a_filter = np.zeros(n)
a_filter[1:-1] = (a[0:-2] + 2.*a[1:-1] + a[2:])/4.
a_filter[0] = (a[n-2] + 2.*a[0] + a[1])/4.
a_filter[n-1] = a_filter[0]
return a_filter
a_filter = explicit_filter(a_noisy)
plt.semilogy(x,np.abs(a_noisy - a_filter), lw = 2, label=r"$\vert a_{noisy}-a_{filter}\vert$")
plt.semilogy(x,np.abs(a - a_filter), color = "red", lw = 2, label=r"$\vert a-a_{filter}\vert$")
plt.legend(loc="lower left", bbox_to_anchor=[0, 1],
ncol=2, shadow=True, fancybox=True)
plt.xlabel(r"$x$", fontdict = fontlabel)
plt.ylabel(r"difference", fontdict = fontlabel)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>1.2 Basic operators <FONT FACE="courier" style="color
Step2: <h3>1.3 Mathematical functions</h3>
Step3: <h3>1.4 Booleans</h3>
Step4: Let's take a pause here to discuss <FONT FACE="courier" style="color
Step5: <h3>1.5 Loops</h3>
Step6: <h2>2 Matrices</h2>
Step7: <h3>2.3 The problem with arrays
Step8: Now, I want to modify the last element <FONT FACE="courier" style="color
Step9: Both my arrays have been changed!!! The same is not true for simple variables
Step10: The fix for arrays is to use <FONT FACE="courier" style="color
Step11: Note that when you perform an operation on <b>all</b> the elements of your array, the problem of linking your old array and new array disappears
Step12: which is equivalent to
Step13: but this is bad
Step14: <h3>2.4 Array operations</h3>
Step15: Note that you need to use the <FONT FACE="courier" style="color
Step16: <p class='alert alert-danger'>
Step18: Using indexing and slicing is much more computationally efficient than using a for loop, here is a proof. The next cell creates a large array of random numbers using a normal distribution.
|
7,875
|
<ASSISTANT_TASK:>
Python Code:
# just some basic setup for the purpose of this demo:
%matplotlib inline
from IPython.display import display
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
#alternative you can use bh_sne:
#from tsne import bh_sne
# generate a set of 4900 random 3D points
totalDataPoints = 2000
dataPoints = np.random.uniform(low=0.0, high=1.0, size=(totalDataPoints,3))
# create a t-sne embedding in 2D (we have to clone the array since bh_sne changes the original data)
# The bh_sne repository can be found here: https://github.com/danielfrg/tsne
#xy = bh_sne(dataPoints.copy())
xy = TSNE().fit_transform(dataPoints)
fig = plt.figure(figsize=(10.0,10.0))
ax = fig.add_subplot(1, 1, 1)
ax.set_facecolor('black')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.autoscale_view(True,True,True)
ax.invert_yaxis()
ax.scatter(xy[:,0],xy[:,1], c = dataPoints, edgecolors='none',marker='s',s=7.5)
plt.show()
from rasterfairy import coonswarp
warped_xy = coonswarp.rectifyCloud(xy,perimeterSubdivisionSteps=4,autoPerimeterOffset=False, paddingScale=1.05)
fig = plt.figure(figsize=(10.0,10.0))
ax = fig.add_subplot(1, 1, 1)
ax.set_facecolor('black')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.autoscale_view(True,True,True)
ax.invert_yaxis()
ax.scatter(warped_xy[:,0],warped_xy[:,1], c = dataPoints, edgecolors='none',marker='s',s=7.5)
plt.show()
sourceGrid = coonswarp.getCloudGrid( xy, perimeterSubdivisionSteps=2,autoPerimeterOffset=False, perimeterOffset=64,paddingScale=1.05, smoothing = 0.5)
targetGrid = coonswarp.getCircularGrid(xy)
warped_xy = coonswarp.warpCloud(xy,sourceGrid,targetGrid)
fig = plt.figure(figsize=(10.0,10.0))
ax = fig.add_subplot(1, 1, 1)
ax.set_facecolor('black')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.autoscale_view(True,True,True)
ax.invert_yaxis()
ax.scatter(xy[:,0],xy[:,1], c = dataPoints, edgecolors='none',s=7.5)
ax.scatter(sourceGrid[:,0],sourceGrid[:,1], c = 'w', edgecolors='none',s=4,alpha=0.6)
fig = plt.figure(figsize=(10.0,10.0))
ax = fig.add_subplot(1, 1, 1)
ax.set_facecolor('black')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.autoscale_view(True,True,True)
ax.invert_yaxis()
ax.scatter(warped_xy[:,0],warped_xy[:,1], c = dataPoints, edgecolors='none',s=7.5)
ax.scatter(targetGrid[:,0],targetGrid[:,1], c = 'w', edgecolors='none',s=4,alpha=0.6)
plt.show()
import io
from moviepy.editor import *
import PIL.Image as PImage
from IPython.display import clear_output, Image
from sklearn.cluster import KMeans
pointCount = len(xy)
gridPointCount = len(sourceGrid)
# some color reduction since the NeuQuant color reduction of images2gif does a bad job in this case:
kmeans = KMeans(n_clusters=min(255,pointCount), random_state=0).fit(dataPoints)
reducedColors = kmeans.cluster_centers_[kmeans.predict(dataPoints)]
width = 64
height = 64
images = []
# a bit of coordinate normalization so the coordinates are in the same range:
px_from = xy[:,0] - min(xy[:,0])
py_from = xy[:,1] - min(xy[:,1])
px_from /= max(px_from)
py_from /= max(py_from)
px_to = warped_xy[:,0] - min(warped_xy[:,0])
py_to = warped_xy[:,1] - min(warped_xy[:,1])
px_to /= max(px_to)
py_to /= max(py_to)
gx_from = sourceGrid[:,0] - min(sourceGrid[:,0])
gy_from = sourceGrid[:,1] - min(sourceGrid[:,1])
gx_from /= max(gx_from)
gy_from /= max(gy_from)
gx_to = targetGrid[:,0] - min(targetGrid[:,0])
gy_to = targetGrid[:,1] - min(targetGrid[:,1])
gx_to /= max(gx_to)
gy_to /= max(gy_to)
steps = 50
for i in range(steps*2):
t = 1.0- abs(1.0 * i / float(steps-1)-1.0) #tsteps[steps]
#t= 1.0 * i / (steps-1)
dx = []
dy = []
for j in range( pointCount ):
dx.append( (1.0-t)*px_from[j] + t*px_to[j])
dy.append( (1.0-t)*py_from[j] + t*py_to[j] )
gx = []
gy = []
for j in range( gridPointCount ):
gx.append( (1.0-t)*gx_from[j] + t*gx_to[j])
gy.append( (1.0-t)*gy_from[j] + t*gy_to[j] )
fig2 = plt.figure(figsize=(10.0,10.0))
ax = fig2.add_subplot(1, 1, 1)
ax.set_facecolor('black')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.scatter(dx,dy, c = reducedColors, edgecolors='none',s=10.0) #,marker='s'
ax.scatter(gx,gy, c = 'w', alpha=0.6, edgecolors='none',s=5.0) #,marker='s'
ax.axes.set_xlim([-0.01,1.01])
ax.axes.set_ylim([-0.01,1.01])
clear_output(wait=True)
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
images.append( np.array(PImage.open(buf)) )
#in case you want to rather export a series of PNGs:
#plt.savefig('clusters/cluster'+"%03d" % (i+steps,)+'.png')
plt.show()
clip = ImageSequenceClip(images,fps=25)
clip.write_gif("../warp_demo.gif")
Image(url="../warp_demo.gif")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The default method tries to warp the cloud towards a square shape. It does that by calculating the outer hull of the cloud and remapping it to a rectangle. The perimeterSubdivisionSteps parameter controls how concave the hull is. A value of 0 is the equivalent of a convex hull, the higher the value becomes the more the hull tries to "creep" into gaps. You have to experiment with this value depending on the shape of the hull - too high values will often result in most of the points being drawn to the edges. The autoPerimeterOffset will look for the grid that best matches the density distribution of the cloud, but will also often result in a rotation of the cloud. The width and height parameters control the resolution of the Coons Patch and will influence how continouus the mapping is. The default values of 64 should give good results in most cases. The paddingScale parameter controls the offset of the patch's hull from the cloud - the bigger the value the less pronounced the warping effect will be.
Step2: Here is another example that shows how to use a different target grid, in this case a circle, to warp the cloud. Also The source and target grids are shown in the rendering
Step3: Here is a snippet that renders an animation of the warp to an animated gif
|
7,876
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
# This is needed to display the images.
%matplotlib inline
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from utils import label_map_util
from utils import visualization_utils as vis_util
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
cam = cv2.VideoCapture('/dev/video0')
cv2.namedWindow('camera')
cv2.moveWindow('camera', 50, 700)
while True:
image_np = cam.read()[1]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('camera', image_np)
if cv2.waitKey(25) & 0xff == ord('q'):
cv2.destroyAllWindows()
break
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Env setup
Step2: Object detection imports
Step3: Model preparation
Step4: Download Model
Step5: Load a (frozen) Tensorflow model into memory.
Step6: Loading label map
Step7: Helper code
Step8: Detection
|
7,877
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Romain Trachel <trachelr@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import io, EvokedArray
from mne.datasets import sample
from mne.decoding import Vectorizer, get_coef
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.4
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(.5, 25, fir_design='firwin')
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=2, baseline=None, preload=True)
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.copy().pick_types(meg=True, eeg=False)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
clf = LogisticRegression(solver='lbfgs')
scaler = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = scaler.fit_transform(meg_data)
model.fit(X, labels)
# Extract and plot spatial filters and spatial patterns
for name, coef in (('patterns', model.patterns_), ('filters', model.filters_)):
# We fitted the linear model onto Z-scored data. To make the filters
# interpretable, we must reverse this normalization step
coef = scaler.inverse_transform([coef])[0]
# The data was vectorized to fit a single model across all time points and
# all channels. We thus reshape it:
coef = coef.reshape(len(meg_epochs.ch_names), -1)
# Plot
evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(title='MEG %s' % name, time_unit='s')
X = epochs.pick_types(meg=False, eeg=True)
y = epochs.events[:, 2]
# Define a unique pipeline to sequentially:
clf = make_pipeline(
Vectorizer(), # 1) vectorize across time and channels
StandardScaler(), # 2) normalize features across trials
LinearModel(
LogisticRegression(solver='lbfgs'))) # 3) fits a logistic regression
clf.fit(X, y)
# Extract and plot patterns and filters
for name in ('patterns_', 'filters_'):
# The `inverse_transform` parameter will call this method on any estimator
# contained in the pipeline, in reverse order.
coef = get_coef(clf, name, inverse_transform=True)
evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(title='EEG %s' % name[:-1], time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Decoding in sensor space using a LogisticRegression classifier
Step3: Let's do the same on EEG data using a scikit-learn pipeline
|
7,878
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import sys
from sklearn import model_selection
import tensorflow as tf
!pip install git+https://github.com/google-research/tensorflow_constrained_optimization
import tensorflow_constrained_optimization as tfco
# We will divide the data into 25 minibatches and refer to them as 'queries'.
num_queries = 25
# List of column names in the dataset.
column_names = ["state", "county", "community", "communityname", "fold", "population", "householdsize", "racepctblack", "racePctWhite", "racePctAsian", "racePctHisp", "agePct12t21", "agePct12t29", "agePct16t24", "agePct65up", "numbUrban", "pctUrban", "medIncome", "pctWWage", "pctWFarmSelf", "pctWInvInc", "pctWSocSec", "pctWPubAsst", "pctWRetire", "medFamInc", "perCapInc", "whitePerCap", "blackPerCap", "indianPerCap", "AsianPerCap", "OtherPerCap", "HispPerCap", "NumUnderPov", "PctPopUnderPov", "PctLess9thGrade", "PctNotHSGrad", "PctBSorMore", "PctUnemployed", "PctEmploy", "PctEmplManu", "PctEmplProfServ", "PctOccupManu", "PctOccupMgmtProf", "MalePctDivorce", "MalePctNevMarr", "FemalePctDiv", "TotalPctDiv", "PersPerFam", "PctFam2Par", "PctKids2Par", "PctYoungKids2Par", "PctTeen2Par", "PctWorkMomYoungKids", "PctWorkMom", "NumIlleg", "PctIlleg", "NumImmig", "PctImmigRecent", "PctImmigRec5", "PctImmigRec8", "PctImmigRec10", "PctRecentImmig", "PctRecImmig5", "PctRecImmig8", "PctRecImmig10", "PctSpeakEnglOnly", "PctNotSpeakEnglWell", "PctLargHouseFam", "PctLargHouseOccup", "PersPerOccupHous", "PersPerOwnOccHous", "PersPerRentOccHous", "PctPersOwnOccup", "PctPersDenseHous", "PctHousLess3BR", "MedNumBR", "HousVacant", "PctHousOccup", "PctHousOwnOcc", "PctVacantBoarded", "PctVacMore6Mos", "MedYrHousBuilt", "PctHousNoPhone", "PctWOFullPlumb", "OwnOccLowQuart", "OwnOccMedVal", "OwnOccHiQuart", "RentLowQ", "RentMedian", "RentHighQ", "MedRent", "MedRentPctHousInc", "MedOwnCostPctInc", "MedOwnCostPctIncNoMtg", "NumInShelters", "NumStreet", "PctForeignBorn", "PctBornSameState", "PctSameHouse85", "PctSameCity85", "PctSameState85", "LemasSwornFT", "LemasSwFTPerPop", "LemasSwFTFieldOps", "LemasSwFTFieldPerPop", "LemasTotalReq", "LemasTotReqPerPop", "PolicReqPerOffic", "PolicPerPop", "RacialMatchCommPol", "PctPolicWhite", "PctPolicBlack", "PctPolicHisp", "PctPolicAsian", "PctPolicMinor", "OfficAssgnDrugUnits", "NumKindsDrugsSeiz", "PolicAveOTWorked", "LandArea", "PopDens", "PctUsePubTrans", "PolicCars", "PolicOperBudg", "LemasPctPolicOnPatr", "LemasGangUnitDeploy", "LemasPctOfficDrugUn", "PolicBudgPerPop", "ViolentCrimesPerPop"]
dataset_url = "http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data"
# Read dataset from the UCI web repository and assign column names.
data_df = pd.read_csv(dataset_url, sep=",", names=column_names,
na_values="?")
# Make sure that there are no missing values in the "ViolentCrimesPerPop" column.
assert(not data_df["ViolentCrimesPerPop"].isna().any())
# Real-valued label: "ViolentCrimesPerPop".
labels_df = data_df["ViolentCrimesPerPop"]
# Now that we have assigned binary labels,
# we drop the "ViolentCrimesPerPop" column from the data frame.
data_df.drop(columns="ViolentCrimesPerPop", inplace=True)
# Group features.
race_black_70_percentile = data_df["racepctblack"].quantile(q=0.7)
groups_df = (data_df["racepctblack"] >= race_black_70_percentile)
# Drop categorical features.
data_df.drop(columns=["state", "county", "community", "communityname", "fold"],
inplace=True)
# Handle missing features.
feature_names = data_df.columns
for feature_name in feature_names:
missing_rows = data_df[feature_name].isna() # Which rows have missing values?
if missing_rows.any(): # Check if at least one row has a missing value.
data_df[feature_name].fillna(0.0, inplace=True) # Fill NaN with 0.
missing_rows.rename(feature_name + "_is_missing", inplace=True)
data_df = data_df.join(missing_rows) # Append boolean "is_missing" feature.
labels = labels_df.values.astype(np.float32)
groups = groups_df.values.astype(np.float32)
features = data_df.values.astype(np.float32)
# Set random seed so that the results are reproducible.
np.random.seed(123456)
# We randomly divide the examples into 'num_queries' queries.
queries = np.random.randint(0, num_queries, size=features.shape[0])
# Train and test indices.
train_indices, test_indices = model_selection.train_test_split(
range(features.shape[0]), test_size=0.4)
# Train features, labels and protected groups.
train_set = {
'features': features[train_indices, :],
'labels': labels[train_indices],
'groups': groups[train_indices],
'queries': queries[train_indices],
'dimension': features.shape[-1],
'num_queries': num_queries
}
# Test features, labels and protected groups.
test_set = {
'features': features[test_indices, :],
'labels': labels[test_indices],
'groups': groups[test_indices],
'queries': queries[test_indices],
'dimension': features.shape[-1],
'num_queries': num_queries
}
def pair_high_low_docs(data):
# Returns a DataFrame of pairs of larger-smaller labeled regression examples
# given in DataFrame.
# For all pairs of docs, and remove rows that are not needed.
pos_docs = data.copy()
neg_docs = data.copy()
# Include a merge key.
pos_docs.insert(0, "merge_key", 0)
neg_docs.insert(0, "merge_key", 0)
# Merge docs and drop merge key and label column.
pairs = pos_docs.merge(neg_docs, on="merge_key", how="outer",
suffixes=("_pos", "_neg"))
# Only retain rows where label_pos > label_neg.
pairs = pairs[pairs.label_pos > pairs.label_neg]
# Drop merge_key.
pairs.drop(columns=["merge_key"], inplace=True)
return pairs
def convert_labeled_to_paired_data(data_dict, index=None):
# Forms pairs of examples from each batch/query.
# Converts data arrays to pandas DataFrame with required column names and
# makes a call to convert_df_to_pairs and returns a dictionary.
features = data_dict['features']
labels = data_dict['labels']
groups = data_dict['groups']
queries = data_dict['queries']
if index is not None:
data_df = pd.DataFrame(features[queries == index, :])
data_df = data_df.assign(label=pd.DataFrame(labels[queries == index]))
data_df = data_df.assign(group=pd.DataFrame(groups[queries == index]))
data_df = data_df.assign(query_id=pd.DataFrame(queries[queries == index]))
else:
data_df = pd.DataFrame(features)
data_df = data_df.assign(label=pd.DataFrame(labels))
data_df = data_df.assign(group=pd.DataFrame(groups))
data_df = data_df.assign(query_id=pd.DataFrame(queries))
# Forms pairs of positive-negative docs for each query in given DataFrame
# if the DataFrame has a query_id column. Otherise forms pairs from all rows
# of the DataFrame.
data_pairs = data_df.groupby('query_id').apply(pair_high_low_docs)
# Create groups ndarray.
pos_groups = data_pairs['group_pos'].values.reshape(-1, 1)
neg_groups = data_pairs['group_neg'].values.reshape(-1, 1)
group_pairs = np.concatenate((pos_groups, neg_groups), axis=1)
# Create queries ndarray.
query_pairs = data_pairs['query_id_pos'].values.reshape(-1,)
# Create features ndarray.
feature_names = data_df.columns
feature_names = feature_names.drop(['query_id', 'label'])
feature_names = feature_names.drop(['group'])
pos_features = data_pairs[[str(s) + '_pos' for s in feature_names]].values
pos_features = pos_features.reshape(-1, 1, len(feature_names))
neg_features = data_pairs[[str(s) + '_neg' for s in feature_names]].values
neg_features = neg_features.reshape(-1, 1, len(feature_names))
feature_pairs = np.concatenate((pos_features, neg_features), axis=1)
# Paired data dict.
paired_data = {
'feature_pairs': feature_pairs,
'group_pairs': group_pairs,
'query_pairs': query_pairs,
'features': features,
'labels': labels,
'queries': queries,
'dimension': data_dict['dimension'],
'num_queries': data_dict['num_queries']
}
return paired_data
def get_mask(groups, pos_group, neg_group=None):
# Returns a boolean mask selecting positive-negative document pairs where
# the protected group for the positive document is pos_group and
# the protected group for the negative document (if specified) is neg_group.
# Repeat group membership positive docs as many times as negative docs.
mask_pos = groups[:, 0] == pos_group
if neg_group is None:
return mask_pos
else:
mask_neg = groups[:, 1] == neg_group
return mask_pos & mask_neg
def mean_squared_error(model, dataset):
# Returns mean squared error for Keras model on dataset.
scores = model.predict(dataset['features'])
labels = dataset['labels']
return np.mean((scores - labels) ** 2)
def group_error_rate(model, dataset, pos_group, neg_group=None):
# Returns error rate for Keras model on data set, considering only document
# pairs where the protected group for the positive document is pos_group, and
# the protected group for the negative document (if specified) is neg_group.
d = dataset['dimension']
scores0 = model.predict(dataset['feature_pairs'][:, 0, :].reshape(-1, d))
scores1 = model.predict(dataset['feature_pairs'][:, 1, :].reshape(-1, d))
mask = get_mask(dataset['group_pairs'], pos_group, neg_group)
diff = scores0 - scores1
diff = diff[mask > 0].reshape((-1))
return np.mean(diff < 0)
def create_scoring_model(feature_pairs, features, dimension):
# Returns a linear Keras scoring model, and returns a nullary function
# returning predictions on the features.
# Linear scoring model with no hidden layers.
layers = []
# Input layer takes `dimension` inputs.
layers.append(tf.keras.Input(shape=(dimension,)))
layers.append(tf.keras.layers.Dense(1))
scoring_model = tf.keras.Sequential(layers)
# Create a nullary function that returns applies the linear model to the
# features and returns the tensor with the prediction differences on pairs.
def prediction_diffs():
scores0 = scoring_model(feature_pairs()[:, 0, :].reshape(-1, dimension))
scores1 = scoring_model(feature_pairs()[:, 1, :].reshape(-1, dimension))
return scores0 - scores1
# Create a nullary function that returns the predictions on individual
# examples.
predictions = lambda: scoring_model(features())
return scoring_model, prediction_diffs, predictions
def group_mask_fn(groups, pos_group, neg_group=None):
# Returns a nullary function returning group mask.
group_mask = lambda: np.reshape(
get_mask(groups(), pos_group, neg_group), (-1))
return group_mask
def formulate_problem(
feature_pairs, group_pairs, features, labels, dimension,
constraint_groups=[], constraint_slack=None):
# Formulates a constrained problem that optimizes the squared error for a linear
# model on the specified dataset, subject to pairwise fairness constraints
# specified by the constraint_groups and the constraint_slack.
#
# Args:
# feature_pairs: Nullary function returning paired features
# group_pairs: Nullary function returning paired groups
# features: Nullary function returning features
# labels: Nullary function returning labels
# dimension: Input dimension for scoring model
# constraint_groups: List containing tuples of the form
# ((pos_group0, neg_group0), (pos_group1, neg_group1)), specifying the
# group memberships for the document pairs to compare in the constraints.
# constraint_slack: slackness '\epsilon' allowed in the constraints.
# Returns:
# A RateMinimizationProblem object, and a Keras scoring model.
# Create linear scoring model: we get back a Keras model and a nullary
# function returning predictions on the features.
scoring_model, prediction_diffs, predictions = create_scoring_model(
feature_pairs, features, dimension)
# Context for the optimization objective.
context = tfco.rate_context(prediction_diffs)
# Squared loss objective.
squared_loss = lambda: tf.reduce_mean((predictions() - labels()) ** 2)
# Constraint set.
constraint_set = []
# Context for the constraints.
for ((pos_group0, neg_group0), (pos_group1, neg_group1)) in constraint_groups:
# Context for group 0.
group_mask0 = group_mask_fn(group_pairs, pos_group0, neg_group0)
context_group0 = context.subset(group_mask0)
# Context for group 1.
group_mask1 = group_mask_fn(group_pairs, pos_group1, neg_group1)
context_group1 = context.subset(group_mask1)
# Add constraints to constraint set.
constraint_set.append(
tfco.negative_prediction_rate(context_group0) <= (
tfco.negative_prediction_rate(context_group1) + constraint_slack))
constraint_set.append(
tfco.negative_prediction_rate(context_group1) <= (
tfco.negative_prediction_rate(context_group0) + constraint_slack))
# Formulate constrained minimization problem.
problem = tfco.RateMinimizationProblem(
tfco.wrap_rate(squared_loss), constraint_set)
return problem, scoring_model
def train_model(train_set, params):
# Trains the model with stochastic updates (one query per updates).
#
# Args:
# train_set: Dictionary of "paired" training data.
# params: Dictionary of hyper-paramters for training.
#
# Returns:
# Trained model, list of objectives, list of group constraint violations.
# Set random seed for reproducibility.
random.seed(333333)
np.random.seed(121212)
tf.random.set_seed(212121)
# Set up problem and model.
if params['constrained']:
# Constrained optimization.
if params['constraint_type'] == 'marginal_equal_opportunity':
constraint_groups = [((0, None), (1, None))]
elif params['constraint_type'] == 'cross_group_equal_opportunity':
constraint_groups = [((0, 1), (1, 0))]
else:
constraint_groups = [((0, 1), (1, 0)), ((0, 0), (1, 1))]
else:
# Unconstrained optimization.
constraint_groups = []
# Dictionary that will hold batch features pairs, group pairs and labels for
# current batch. We include one query per-batch.
paired_batch = {}
batch_index = 0 # Index of current query.
# Data functions.
feature_pairs = lambda: paired_batch['feature_pairs']
group_pairs = lambda: paired_batch['group_pairs']
features = lambda: paired_batch['features']
labels = lambda: paired_batch['labels']
# Create scoring model and constrained optimization problem.
problem, scoring_model = formulate_problem(
feature_pairs, group_pairs, features, labels, train_set['dimension'],
constraint_groups, params['constraint_slack'])
# Create a loss function for the problem.
lagrangian_loss, update_ops, multipliers_variables = (
tfco.create_lagrangian_loss(problem, dual_scale=params['dual_scale']))
# Create optimizer
optimizer = tf.keras.optimizers.Adagrad(learning_rate=params['learning_rate'])
# List of trainable variables.
var_list = (
scoring_model.trainable_weights + problem.trainable_variables +
[multipliers_variables])
# List of objectives, group constraint violations.
# violations, and snapshot of models during course of training.
objectives = []
group_violations = []
models = []
feature_pair_batches = train_set['feature_pairs']
group_pair_batches = train_set['group_pairs']
query_pairs = train_set['query_pairs']
feature_batches = train_set['features']
label_batches = train_set['labels']
queries = train_set['queries']
print()
# Run loops * iterations_per_loop full batch iterations.
for ii in range(params['loops']):
for jj in range(params['iterations_per_loop']):
# Populate paired_batch dict with all pairs for current query. The batch
# index is the same as the current query index.
paired_batch = {
'feature_pairs': feature_pair_batches[query_pairs == batch_index],
'group_pairs': group_pair_batches[query_pairs == batch_index],
'features': feature_batches[queries == batch_index],
'labels': label_batches[queries == batch_index]
}
# Optimize loss.
update_ops()
optimizer.minimize(lagrangian_loss, var_list=var_list)
# Update batch_index, and cycle back once last query is reached.
batch_index = (batch_index + 1) % train_set['num_queries']
# Snap shot current model.
model_copy = tf.keras.models.clone_model(scoring_model)
model_copy.set_weights(scoring_model.get_weights())
models.append(model_copy)
# Evaluate metrics for snapshotted model.
error, gerr, group_viol = evaluate_results(
scoring_model, train_set, params)
objectives.append(error)
group_violations.append(
[x - params['constraint_slack'] for x in group_viol])
sys.stdout.write(
'\r Loop %d: error = %.3f, max constraint violation = %.3f' %
(ii, objectives[-1], max(group_violations[-1])))
print()
if params['constrained']:
# Find model iterate that trades-off between objective and group violations.
best_index = tfco.find_best_candidate_index(
np.array(objectives), np.array(group_violations), rank_objectives=False)
else:
# Find model iterate that achieves lowest objective.
best_index = np.argmin(objectives)
return models[best_index]
def evaluate_results(model, test_set, params):
# Returns sqaured error, group error rates, group-level constraint violations.
if params['constraint_type'] == 'marginal_equal_opportunity':
g0_error = group_error_rate(model, test_set, 0)
g1_error = group_error_rate(model, test_set, 1)
group_violations = [g0_error - g1_error, g1_error - g0_error]
return (mean_squared_error(model, test_set), [g0_error, g1_error],
group_violations)
else:
g00_error = group_error_rate(model, test_set, 0, 0)
g01_error = group_error_rate(model, test_set, 0, 1)
g10_error = group_error_rate(model, test_set, 1, 1)
g11_error = group_error_rate(model, test_set, 1, 1)
group_violations_offdiag = [g01_error - g10_error, g10_error - g01_error]
group_violations_diag = [g00_error - g11_error, g11_error - g00_error]
if params['constraint_type'] == 'cross_group_equal_opportunity':
return (mean_squared_error(model, test_set),
[[g00_error, g01_error], [g10_error, g11_error]],
group_violations_offdiag)
else:
return (mean_squared_error(model, test_set),
[[g00_error, g01_error], [g10_error, g11_error]],
group_violations_offdiag + group_violations_diag)
def display_results(
model, test_set, params, method, error_type, show_header=False):
# Prints evaluation results for model on test data.
error, group_error, diffs = evaluate_results(model, test_set, params)
if params['constraint_type'] == 'marginal_equal_opportunity':
if show_header:
print('\nMethod\t\t\tError\t\tMSE\t\tGroup 0\t\tGroup 1\t\tDiff')
print('%s\t%s\t\t%.3f\t\t%.3f\t\t%.3f\t\t%.3f' % (
method, error_type, error, group_error[0], group_error[1],
np.max(diffs)))
elif params['constraint_type'] == 'cross_group_equal_opportunity':
if show_header:
print('\nMethod\t\t\tError\t\tMSE\t\tGroup 0/1\tGroup 1/0\tDiff')
print('%s\t%s\t\t%.3f\t\t%.3f\t\t%.3f\t\t%.3f' % (
method, error_type, error, group_error[0][1], group_error[1][0],
np.max(diffs)))
else:
if show_header:
print('\nMethod\t\t\tError\t\MSE\t\tGroup 0/1\tGroup 1/0\t' +
'Group 0/0\tGroup 1/1\tDiff')
print('%s\t%s\t\t%.3f\t\t%.3f\t\t%.3f\t\t%.3f\t\t%.3f\t\t%.3f' % (
method, error_type, error, group_error[0][1], group_error[1][0],
group_error[0][0], group_error[1][1], np.max(diffs)))
# Convert train/test set to paired data for later evaluation.
paired_train_set = convert_labeled_to_paired_data(train_set)
paired_test_set = convert_labeled_to_paired_data(test_set)
# Model hyper-parameters.
model_params = {
'loops': 10,
'iterations_per_loop': 250,
'learning_rate': 0.1,
'constraint_type': 'marginal_equal_opportunity',
'constraint_slack': 0.02,
'dual_scale': 1.0}
# Unconstrained optimization.
model_params['constrained'] = False
model_unc = train_model(paired_train_set, model_params)
display_results(model_unc, paired_train_set, model_params, 'Unconstrained ',
'Train', show_header=True)
display_results(model_unc, paired_test_set, model_params, 'Unconstrained ',
'Test')
# Constrained optimization with TFCO.
model_params['constrained'] = True
model_con = train_model(paired_train_set, model_params)
display_results(model_con, paired_train_set, model_params, 'Constrained ',
'Train', show_header=True)
display_results(model_con, paired_test_set, model_params, 'Constrained ',
'Test')
# Model hyper-parameters.
model_params = {
'loops': 10,
'iterations_per_loop': 250,
'learning_rate': 0.1,
'constraint_type': 'cross_group_equal_opportunity',
'constraint_slack': 0.02,
'dual_scale': 1.0}
# Unconstrained optimization.
model_params['constrained'] = False
model_unc = train_model(paired_train_set, model_params)
display_results(model_unc, paired_train_set, model_params, 'Unconstrained ',
'Train', show_header=True)
display_results(model_unc, paired_test_set, model_params, 'Unconstrained ',
'Test')
# Constrained optimization with TFCO.
model_params['constrained'] = True
model_con = train_model(paired_train_set, model_params)
display_results(model_con, paired_train_set, model_params, 'Constrained ',
'Train', show_header=True)
display_results(model_con, paired_test_set, model_params, 'Constrained ',
'Test')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pairwise Regression Fairness
Step2: Evaluation Metrics
Step3: We will also need functions to evaluate the pairwise error rates for a linear model.
Step4: Create Linear Model
Step5: Formulate Optimization Problem
Step6: Train Model
Step7: Summarize and Plot Results
Step8: Experimental Results
Step9: (1) Marginal Equal Opportunity
Step10: (2) Pairwise Equal Opportunity
|
7,879
|
<ASSISTANT_TASK:>
Python Code:
#!/usr/bin/env python
# -*- Python -*-
import sys
import time
import subprocess
#
# set up user environment
# RtmToolsDir, MyRtcDir, etc.
#
# from set_env import * : you may provide a setup file like this
#
RtmToolsDir="../.."
MyRtcDir=".."
NS0="localhost:9876"
#
# import user tools
#
sys.path.append(".")
save_path = sys.path[:]
sys.path.append(RtmToolsDir+'/rtc_handle')
from rtc_handle import *
# from rtc_handle_util import *
# sys.path.append(RtmToolsDir+'/embryonic_rtc')
# from EmbryonicRtc import *
sys.path = save_path
#
# import stub files
#
#import _GlobalIDL
#
# user program
#
#
env = RtmEnv(sys.argv,[NS0])
env.name_space[NS0].list_obj()
env.name_space[NS0].obj_list
env.name_space[NS0].rtc_handles
cin=env.name_space[NS0].rtc_handles['cin0.rtc']
cout=env.name_space[NS0].rtc_handles['cout0.rtc']
cout.activate()
cout.deactivate()
cout.activate()
cin.activate()
cin.deactivate()
cin.activate()
cout.inports
cout.inports['str_in'].open()
cout.inports['str_in'].write('abc')
cout.inports['str_in'].close()
cin.outports
cin.outports['str_out'].open()
cin.outports['str_out'].con.prop_dict
cin.outports['str_out'].read()
cin.outports['str_out'].close()
con = IOConnector([cin.outports['str_out'], cout.inports['str_in']])
con.def_prop
con.connect()
con.profile
con.prop_dict
con.disconnect()
con = IOConnector([cin.outports['str_out'], cout.inports['str_in']],
prop_dict={'dataport.inport.buffer.length': '8'})
con.prop_dict_req
con.connect()
con.prop_dict
con.disconnect()
b = IOConnector([cin.outports['str_out'], cout.inports['str_in']])
b.connect()
con.connect()
con.disconnect()
con.connect(force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: setup user environmet
Step2: import user tools
Step3: RtmEnv
Step4: NameSpace
Step5: RtcHandle
Step6: activate and deactivate rtcs
Step7: deactivation of some rtcs may fail.
Step8: but it usually recovers after the resorce is available. please input something at the cin console. then,
Step9: direct access to Inports and Outports
Step10: get data from outport
Step11: IOConnector
Step12: default properties of the connector is as follows
Step13: connect ports
Step14: disconnect ports
Step15: change properties
Step16: conflict of connections
Step17: you can handle this situation by forcing connect/disconnect operation
|
7,880
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
log = pd.read_csv("../dataset/linux_blame_log.csv.gz")
log.head()
log.info()
log['timestamp'] = pd.to_datetime(log['timestamp'])
log.head()
log['age'] = pd.Timestamp('today') - log['timestamp']
log.head()
log['component'] = log['path'].str.split("/").str[:2].str.join(":")
log.head()
age_per_component = log.groupby("component")['age'].min().sort_values()
age_per_component.head()
age_per_component.plot.bar(figsize=[15,5]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Was haben wir hier eigentlich?
Step2: <b>1</b> DataFrame (~ programmierbares Excel-Arbeitsblatt), <b>4</b> Series (= Spalten), <b>5665947</b> Rows (= Einträge)
Step3: IV. Anreichern
Step4: Wir ordnen jeder Zeilenänderung einer Komponente zu
Step5: <br/> <small><i>String-Operationen...die dauern. Gibt aber diverse Optimierungsmöglichkeiten!</i></small>
Step6: IV. Visualisieren
|
7,881
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from pandas import DataFrame
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_iris
seed = 2017
np.random.seed(seed)
data = load_iris()
idx = np.random.permutation(150)
X = data.data[idx]
y = data.target[idx]
from mlens.ensemble import SuperLearner
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# --- Build ---
# Passing a scoring function will create cv scores during fitting
# the scorer should be a simple function accepting to vectors and returning a scalar
ensemble = SuperLearner(scorer=accuracy_score, random_state=seed, verbose=2)
# Build the first layer
ensemble.add([RandomForestClassifier(random_state=seed), SVC()])
# Attach the final meta estimator
ensemble.add_meta(LogisticRegression())
# --- Use ---
# Fit ensemble
ensemble.fit(X[:75], y[:75])
# Predict
preds = ensemble.predict(X[75:])
print("Fit data:\n%r" % ensemble.data)
print("Prediction score: %.3f" % accuracy_score(preds, y[75:]))
ensemble = SuperLearner(scorer=accuracy_score, random_state=seed)
# Build the first layer
ensemble.add([RandomForestClassifier(random_state=seed), LogisticRegression()])
# Build the second layer
ensemble.add([LogisticRegression(), SVC()])
# Attach the final meta estimator
ensemble.add_meta(SVC())
ensemble.fit(X[:75], y[:75])
preds = ensemble.predict(X[75:])
print("Fit data:\n%r" % ensemble.data)
from mlens.metrics import make_scorer
accuracy_scorer = make_scorer(accuracy_score, greater_is_better=True)
from mlens.model_selection import Evaluator
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from scipy.stats import randint
# Here we name the estimators ourselves
ests = [('gnb', GaussianNB()), ('knn', KNeighborsClassifier())]
# Now we map parameters to these
# The gnb doesn't have any parameters so we can skip it
pars = {'n_neighbors': randint(2, 20)}
params = {'knn': pars}
evaluator = Evaluator(accuracy_scorer, cv=10, random_state=seed, verbose=1)
evaluator.fit(X, y, ests, params, n_iter=10)
print("Score comparison with best params founds:\n\n%r" % evaluator.results)
from mlens.preprocessing import Subset
from sklearn.preprocessing import StandardScaler
# Map preprocessing cases through a dictionary
preprocess_cases = {'none': [],
'sc': [StandardScaler()],
'sub': [Subset([0, 1])]
}
evaluator.fit(X, y, preprocessing=preprocess_cases)
evaluator.fit(X, y, ests, params, n_iter=10)
print("\nComparison across preprocessing pipelines:\n\n%r" % evaluator.results)
pars_1 = {'n_neighbors': randint(20, 30)}
pars_2 = {'n_neighbors': randint(2, 10)}
params = {'sc.knn': pars_1,
'none.knn': pars_2,
'sub.knn': pars_2}
# We can map different estimators to different cases
ests_1 = [('gnb', GaussianNB()), ('knn', KNeighborsClassifier())]
ests_2 = [('knn', KNeighborsClassifier())]
estimators = {'sc': ests_1,
'none': ests_2,
'sub': ests_1}
evaluator.fit(X, y, estimators, params, n_iter=10)
print("\nComparison with different parameter dists:\n\n%r" % evaluator.results)
import matplotlib.pyplot as plt
from mlens.visualization import exp_var_plot
from sklearn.decomposition import PCA
exp_var_plot(X, PCA(), marker='s', where='post')
from mlens.visualization import pca_plot
from sklearn.decomposition import PCA
pca_plot(X, PCA(n_components=2), y=y)
from mlens.visualization import pca_comp_plot
pca_comp_plot(X, y)
plt.show()
from mlens.visualization import corrmat
# Generate som different predictions to correlate
params = [0.1, 0.3, 1.0, 3.0, 10, 30]
preds = np.zeros((150, 6))
for i, c in enumerate(params):
preds[:, i] = LogisticRegression(C=c).fit(X, y).predict(X)
corr = DataFrame(preds, columns=['C=%.1f' % i for i in params]).corr()
corrmat(corr)
plt.show()
from mlens.visualization import clustered_corrmap
from sklearn.cluster import KMeans
Z = DataFrame(X, columns=['f_%i' % i for i in range(1, 5)])
corr = Z.join(Z, lsuffix='L', rsuffix='R').corr()
clustered_corrmap(corr, KMeans())
plt.show()
from mlens.visualization import corr_X_y
Z = DataFrame(X, columns=['feature_%i' % i for i in range(1, 5)])
corr_X_y(Z, y, 2, no_ticks=False)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ensemble guide
Step2: To check the performance of estimator in the layers, call the data
Step3: To round off, let's see how the ensemble as a whole fared.
Step4: Multi-layer ensembles
Step5: We now fit this ensemble in the same manner as before
Step6: .. py
Step7: .. currentmodule
Step8: We can now run an evaluation over these estimators and parameter distributions
Step9: The full history of the evaluation can be found in cv_results. To compare
Step10: Preprocessing
Step11: The fit methods determines automatically whether there is any preprocessing
Step12: Model Selection across preprocessing pipelines
Step13: You can also map different estimators to different preprocessing folds, and
Step14: .. py
Step15: Principal Components Analysis plot
Step16: Principal Components Comparison plot
Step17: Correlation matrix plot
Step18: Clustered correlation heatmap plot
Step19: We duplicate all features, note that the heatmap orders features
Step20: Input-Output correlations
|
7,882
|
<ASSISTANT_TASK:>
Python Code:
from pymongo import MongoClient
import time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
import datetime as dt
import random as rnd
import warnings
import datetime as dt
import csv
%matplotlib inline
warnings.filterwarnings(action="ignore")
Client = MongoClient("mongodb://bridges:readonly@nbi-mongo.admin/bridge")
db = Client.bridge
collection = db["bridges"]
years = [1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,
2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016]
## convert following list to
#states = ['25','04','08','38','09','19', '26', '48','35', '17', '51',
# '23','16', '36','56','29', '39','28','11', '21', '18','06','47','12',
# '24','34','46','13','55','30','54','15', '32', '37','10','33','44',
# '50', '42','05','20','45','22','40','72','41','53', '01', '31','02','49']
states = ['31']
masterdec = []
for yr in years:
for state in states:
#print(state + str(year))
pipeline = [{"$match":{"$and":[{"year":yr},{"stateCode":state}]}},
{"$project":{"_id":0,
"year":1,
"stateCode":1,
"structureNumber":1,
"yearBuilt":1,
"yearReconstructed":1,
"deck":1,
"substructure":1, ## rating of substructure
"superstructure":1, ## rating of superstructure
}}]
dec = collection.aggregate(pipeline)
for i in list(dec):
masterdec.append(i)
#masterdec.append(list(dec))
conditionRatings = pd.DataFrame(masterdec)
conditionRatings.head()
before = len(conditionRatings)
print("Total Records before filteration: ",len(conditionRatings))
conditionRatings = conditionRatings.loc[~conditionRatings['deck'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['substructure'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['superstructure'].isin(['N','NA'])]
after = len(conditionRatings)
print("Total Records after filteration: ",len(conditionRatings))
print("Difference: ", before - after)
conditionRatings['yearReconstructed'].unique()
yrCR = conditionRatings.loc[~conditionRatings['yearReconstructed'].isin([0,-1])]
len(yrCR)
yrCR = yrCR.groupby('yearReconstructed').agg(["count"])
column = ['deck','stateCode','structureNumber','substructure','superstructure','year','yearBuilt']
yrCR.columns = column
yrCR.sort_values(by='deck',ascending = False)['structureNumber'][:15]
deckCondition = {k: g["deck"].tolist() for k,g in conditionRatings.groupby("structureNumber")}
substructureCondition = {k: g["substructure"].tolist() for k,g in conditionRatings.groupby("structureNumber")}
superstructureCondition = {k: g["superstructure"].tolist() for k,g in conditionRatings.groupby("structureNumber")}
deckCondition
def findAllIntervals(lst):
fList = []
temp = []
i = 0
j = 1
for k in lst:
if j == len(lst):
temp.append(int(lst[i]))
fList.append(temp)
return (fList,len(fList))
if lst[i] == lst[j]:
pass
temp.append(int(lst[i]))
if lst[i] < lst[j]:
diff = int(lst[j]) - int(lst[i])
if diff > 1:
#break
fList.append(temp)
temp = []
pass
i = i + 1
j = j + 1
deckIntervals = []
deckIntervalSize = []
for i in deckCondition.values():
lst, size = findAllIntervals(i)
deckIntervalSize.append(size)
deckIntervals.append(lst)
#
from collections import Counter
keys =list(Counter(deckIntervalSize).keys())
values =list(Counter(deckIntervalSize).values())
plt.figure(figsize=(10,8))
plt.bar(keys,values)
plt.xlabel("No. of Intervals")
plt.ylabel("No. of Records")
plt.title("No. of Records vs No. of Intervals")
plt.show()
## Filter records with only two or more interval
deckIntervalGt2 = []
for index in range(0,len(deckIntervalSize)-1,1):
if deckIntervalSize[index] > 1:
deckIntervalGt2.append(deckIntervals[index])
first_interval = []
second_interval = []
for intervals in deckIntervalGt2:
first_interval.append(len(intervals[0]))
second_interval.append(len(intervals[1]))
print("Mean of the first time intervals :", np.mean(first_interval))
print("Mean of the second time intervals :", np.mean(second_interval))
avgStartOfFirstInterval = []
avgStartOfSecondInterval = []
for intervals in deckIntervalGt2:
avgStartOfFirstInterval.append(intervals[0][0])
avgStartOfSecondInterval.append(intervals[1][0])
print("Mean of the first condition rating of first time intervals :", np.mean(avgStartOfFirstInterval))
print("Mean of the first condition rating of second time intervals :", np.mean(avgStartOfSecondInterval))
superstructureIntervals = []
superstructureIntervalSize = []
for i in superstructureCondition.values():
lst, size = findAllIntervals(i)
superstructureIntervalSize.append(size)
superstructureIntervals.append(lst)
#
from collections import Counter
keys_superstructure =list(Counter(superstructureIntervalSize).keys())
values_superstructure =list(Counter(superstructureIntervalSize).values())
plt.figure(figsize=(10,8))
plt.bar(keys_superstructure,values_superstructure)
plt.xlabel("No. of Intervals")
plt.ylabel("No. of Records")
plt.xticks([1,2,3,4,5])
plt.title("No. of Records vs No. of Intervals")
plt.show()
## Filter records with only two or more interval
superstructureIntervalGt2 = []
for index in range(0,len(superstructureIntervalSize)-1,1):
if superstructureIntervalSize[index] > 1:
superstructureIntervalGt2.append(superstructureIntervals[index])
superstructureFirstInterval = []
superstructureSecondInterval = []
for intervals in superstructureIntervalGt2:
superstructureFirstInterval.append(len(intervals[0]))
superstructureSecondInterval.append(len(intervals[1]))
print("Mean of the first time intervals :", np.mean(superstructureFirstInterval))
print("Mean of the second time intervals :", np.mean(superstructureSecondInterval))
avgStartOfFirstInterval_sp = []
avgStartOfSecondInterval_sp = []
for intervals in superstructureIntervalGt2:
avgStartOfFirstInterval_sp.append(intervals[0][0])
avgStartOfSecondInterval_sp.append(intervals[1][0])
print("Mean of the first condition rating of first time intervals :", np.mean(avgStartOfFirstInterval_sp))
print("Mean of the first condition rating of second time intervals :", np.mean(avgStartOfSecondInterval_sp))
substructureIntervals = []
substructureIntervalSize = []
for i in substructureCondition.values():
lst, size = findAllIntervals(i)
substructureIntervalSize.append(size)
substructureIntervals.append(lst)
#
from collections import Counter
keys_substructure =list(Counter(substructureIntervalSize).keys())
values_substructure =list(Counter(substructureIntervalSize).values())
plt.figure(figsize=(10,8))
plt.bar(keys_substructure,values_substructure)
plt.xlabel("No. of Intervals")
plt.ylabel("No. of Records")
plt.xticks([1,2,3,4,5])
plt.title("No. of Records vs No. of Intervals")
plt.show()
## Filter records with only two or more interval
substructureIntervalGt2 = []
for index in range(0,len(substructureIntervalSize)-1,1):
if substructureIntervalSize[index] > 1:
substructureIntervalGt2.append(substructureIntervals[index])
substructureFirstInterval = []
substructureSecondInterval = []
for intervals in substructureIntervalGt2:
substructureFirstInterval.append(len(intervals[0]))
substructureSecondInterval.append(len(intervals[1]))
print("Mean of the first time intervals :", np.mean(substructureFirstInterval))
print("Mean of the second time intervals :", np.mean(substructureSecondInterval))
avgStartOfFirstInterval_sb = []
avgStartOfSecondInterval_sb = []
for intervals in substructureIntervalGt2:
avgStartOfFirstInterval_sb.append(intervals[0][0])
avgStartOfSecondInterval_sb.append(intervals[1][0])
print("Mean of the first condition rating of first time intervals :", np.mean(avgStartOfFirstInterval_sb))
print("Mean of the first condition rating of second time intervals :", np.mean(avgStartOfSecondInterval_sb))
CR1992 = conditionRatings.loc[conditionRatings['yearBuilt'] >=1992]
CR1992.head()
deckCondition_1992 = {k: g["deck"].tolist() for k,g in CR1992.groupby("structureNumber")}
substructureCondition_1992 = {k: g["substructure"].tolist() for k,g in CR1992.groupby("structureNumber")}
superstructureCondition_1992 = {k: g["superstructure"].tolist() for k,g in CR1992.groupby("structureNumber")}
deckIntervals_1992 = []
deckIntervalSize_1992 = []
for i in deckCondition_1992.values():
lst, size = findAllIntervals(i)
deckIntervalSize_1992.append(size)
deckIntervals_1992.append(lst)
#
from collections import Counter
keys_d1992 =list(Counter(deckIntervalSize_1992).keys())
values_d1992 =list(Counter(deckIntervalSize_1992).values())
plt.figure(figsize=(10,8))
plt.bar(keys_d1992,values_d1992)
plt.xlabel("No. of Intervals")
plt.ylabel("No. of Records")
plt.xticks([1,2,3,4,5])
plt.title("No. of Records vs No. of Intervals")
plt.show()
## Filter records with only two or more interval
deckIntervalGt2_1992 = []
for index in range(0,len(deckIntervalSize_1992)-1,1):
if deckIntervalSize_1992[index] > 1:
deckIntervalGt2_1992.append(deckIntervals_1992[index])
deckFirstInterval_1992 = []
deckSecondInterval_1992 = []
for intervals in deckIntervalGt2_1992:
deckFirstInterval_1992.append(len(intervals[0]))
deckSecondInterval_1992.append(len(intervals[1]))
print("Mean of the first time intervals :", np.mean(deckFirstInterval_1992))
print("Mean of the second time intervals :", np.mean(deckSecondInterval_1992))
print()
avgStartOfFirstInterval_1992 = []
avgStartOfSecondInterval_1992 = []
for intervals in deckIntervalGt2_1992:
avgStartOfFirstInterval_1992.append(intervals[0][0])
avgStartOfSecondInterval_1992.append(intervals[1][0])
print("Mean of the first condition rating of first time intervals :", np.mean(avgStartOfFirstInterval_1992))
print("Mean of the first condition rating of second time intervals :", np.mean(avgStartOfSecondInterval_1992))
superstructureIntervals_1992 = []
superstructureIntervalSize_1992 = []
for i in superstructureCondition_1992.values():
lst, size = findAllIntervals(i)
superstructureIntervalSize_1992.append(size)
superstructureIntervals_1992.append(lst)
#
from collections import Counter
keys_sp1992 =list(Counter(superstructureIntervalSize_1992).keys())
values_sp1992 =list(Counter(superstructureIntervalSize_1992).values())
plt.figure(figsize=(10,8))
plt.bar(keys_sp1992,values_sp1992)
plt.xlabel("No. of Intervals")
plt.ylabel("No. of Records")
plt.xticks([1,2,3,4,5])
plt.title("No. of Records vs No. of Intervals")
plt.show()
superstructureIntervalGt2_1992 = []
for index in range(0,len(superstructureIntervalSize_1992)-1,1):
if superstructureIntervalSize_1992[index] > 1:
superstructureIntervalGt2_1992.append(superstructureIntervals_1992[index])
superstructureFirstInterval_1992 = []
superstructureSecondInterval_1992 = []
for intervals in superstructureIntervalGt2_1992:
superstructureFirstInterval_1992.append(len(intervals[0]))
superstructureSecondInterval_1992.append(len(intervals[1]))
print("Mean of the first time intervals :", np.mean(superstructureFirstInterval_1992))
print("Mean of the second time intervals :", np.mean(superstructureSecondInterval_1992))
print()
avgStartOfFirstInterval_sp_1992 = []
avgStartOfSecondInterval_sp_1992 = []
for intervals in superstructureIntervalGt2_1992:
avgStartOfFirstInterval_sp_1992.append(intervals[0][0])
avgStartOfSecondInterval_sp_1992.append(intervals[1][0])
print("Mean of the first condition rating of first time intervals :", np.mean(avgStartOfFirstInterval_sp_1992))
print("Mean of the first condition rating of second time intervals :", np.mean(avgStartOfSecondInterval_sp_1992))
substructureIntervals_1992 = []
substructureIntervalSize_1992 = []
for i in substructureCondition_1992.values():
lst, size = findAllIntervals(i)
substructureIntervalSize_1992.append(size)
substructureIntervals_1992.append(lst)
#
from collections import Counter
keys_sb1992 =list(Counter(substructureIntervalSize_1992).keys())
values_sb1992 =list(Counter(substructureIntervalSize_1992).values())
plt.figure(figsize=(10,8))
plt.bar(keys_sb1992,values_sb1992)
plt.xlabel("No. of Intervals")
plt.ylabel("No. of Records")
plt.xticks([1,2,3,4,5])
plt.title("No. of Records vs No. of Intervals")
plt.show()
substructureIntervalGt2_1992 = []
for index in range(0,len(substructureIntervalSize_1992)-1,1):
if substructureIntervalSize_1992[index] > 1:
substructureIntervalGt2_1992.append(substructureIntervals_1992[index])
substructureFirstInterval_1992 = []
substructureSecondInterval_1992 = []
for intervals in substructureIntervalGt2_1992:
substructureFirstInterval_1992.append(len(intervals[0]))
substructureSecondInterval_1992.append(len(intervals[1]))
print("Mean of the first time intervals :", np.mean(substructureFirstInterval_1992))
print("Mean of the second time intervals :", np.mean(substructureSecondInterval_1992))
print()
avgStartOfFirstInterval_sb_1992 = []
avgStartOfSecondInterval_sb_1992 = []
for intervals in substructureIntervalGt2_1992:
avgStartOfFirstInterval_sb_1992.append(intervals[0][0])
avgStartOfSecondInterval_sb_1992.append(intervals[1][0])
print("Mean of the first condition rating of first time intervals :", np.mean(avgStartOfFirstInterval_sb_1992))
print("Mean of the first condition rating of second time intervals :", np.mean(avgStartOfSecondInterval_sb_1992))
## Consider creating a deterioration model and curves
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extraction of Data from MongoDB and Creating DataFrame
Step2: First five rows from the dataframe
Step3: Data filterxation
Step4: Data Exploration
Step5: Number of bridges with valid year recorded for year reconstructed.
Step6: 2. Deck, Substructure, and Supertructure
Step7: we have a set of three dictionary for substructure, superstructure and deck of all the bridges from 1992 - 2016.
Step8: Dividing the time interval of deterioration on basis of suspected intervention. if the deterioration of condition rating is abruptly interfered by sudden increase in condition rating, we will consider a possible case of intervention (Repair, Reconstruction and Repair) and hence the time interval will be divided at the point of intervention, which is considered as splitting point, and second interval will be calculated from next point.
Step9: Calculating the mean time interval for first and second time interval for Deck
Step10: As opposed to the hypothesis the mean time for second interval is greater than first time interval for deck. but not all intervals start from rating 9 and we might not consider that some of the bridges might have be reconstructed before 1992
Step11: Calculating the mean time interval for first and second time interval for Superstructure
Step12: As opposed to the hypothesis the mean time for second interval is greater than first time interval for superstructure.
Step13: Calculating the mean time interval for first and second time interval for Substructure
Step14: Consideration
Step15: Superstructure built after 1992
Step16: Superstructure built after 1992
Step17: Substructure built after 1992
|
7,883
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
! sudo apt install curl
! curl https://bazel.build/bazel-release.pub.gpg | sudo apt-key add -
! echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list
! sudo apt update && sudo apt install bazel
! git clone https://github.com/google-coral/tflite.git
! sudo apt-get install -y crossbuild-essential-armhf crossbuild-essential-arm64 bazel
%cd tflite/cpp/examples/lstpu
! make CPU=aarch64
from google.colab import files
files.download('bazel-out/aarch64-opt/bin/lstpu')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build the C++ "lstpu" example for Coral
Step2: Download example from GitHub
Step3: Build lstpu example for Dev Board
Step4: The following line builds for an ARM64 system (aarch64). Alternative CPU architectures are k8 and armv7a.
Step5: Download the binary
|
7,884
|
<ASSISTANT_TASK:>
Python Code:
from learning import *
from probabilistic_learning import *
from notebook import *
%psource DataSet
iris = DataSet(name="iris")
print(iris.examples[0])
print(iris.inputs)
iris2 = DataSet(name="iris",exclude=[1])
print(iris2.inputs)
print(iris.examples[:3])
print("attrs:", iris.attrs)
print("attrnames (by default same as attrs):", iris.attrnames)
print("target:", iris.target)
print("inputs:", iris.inputs)
print(iris.values[0])
print("name:", iris.name)
print("source:", iris.source)
print(iris.values[iris.target])
print("Sanitized:",iris.sanitize(iris.examples[0]))
print("Original:",iris.examples[0])
iris2 = DataSet(name="iris")
iris2.remove_examples("virginica")
print(iris2.values[iris2.target])
print("Class of first example:",iris2.examples[0][iris2.target])
iris2.classes_to_numbers()
print("Class of first example:",iris2.examples[0][iris2.target])
means, deviations = iris.find_means_and_deviations()
print("Setosa feature means:", means["setosa"])
print("Versicolor mean for first feature:", means["versicolor"][0])
print("Setosa feature deviations:", deviations["setosa"])
print("Virginica deviation for second feature:",deviations["virginica"][1])
iris = DataSet(name="iris")
show_iris()
show_iris(0, 1, 3)
show_iris(1, 2, 3)
def manhattan_distance(X, Y):
return sum([abs(x - y) for x, y in zip(X, Y)])
distance = manhattan_distance([1,2], [3,4])
print("Manhattan Distance between (1,2) and (3,4) is", distance)
def euclidean_distance(X, Y):
return math.sqrt(sum([(x - y)**2 for x, y in zip(X,Y)]))
distance = euclidean_distance([1,2], [3,4])
print("Euclidean Distance between (1,2) and (3,4) is", distance)
def hamming_distance(X, Y):
return sum(x != y for x, y in zip(X, Y))
distance = hamming_distance(['a','b','c'], ['a','b','b'])
print("Hamming Distance between 'abc' and 'abb' is", distance)
def mean_boolean_error(X, Y):
return mean(int(x != y) for x, y in zip(X, Y))
distance = mean_boolean_error([1,2,3], [1,4,5])
print("Mean Boolean Error Distance between (1,2,3) and (1,4,5) is", distance)
def mean_error(X, Y):
return mean([abs(x - y) for x, y in zip(X, Y)])
distance = mean_error([1,0,5], [3,10,5])
print("Mean Error Distance between (1,0,5) and (3,10,5) is", distance)
def ms_error(X, Y):
return mean([(x - y)**2 for x, y in zip(X, Y)])
distance = ms_error([1,0,5], [3,10,5])
print("Mean Square Distance between (1,0,5) and (3,10,5) is", distance)
def rms_error(X, Y):
return math.sqrt(ms_error(X, Y))
distance = rms_error([1,0,5], [3,10,5])
print("Root of Mean Error Distance between (1,0,5) and (3,10,5) is", distance)
psource(PluralityLearner)
zoo = DataSet(name="zoo")
pL = PluralityLearner(zoo)
print(pL([1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 4, 1, 0, 1]))
psource(NearestNeighborLearner)
iris = DataSet(name="iris")
kNN = NearestNeighborLearner(iris,k=3)
print(kNN([5.1,3.0,1.1,0.1]))
pseudocode("Decision Tree Learning")
psource(DecisionFork)
psource(DecisionLeaf)
psource(DecisionTreeLearner)
iris = DataSet(name="iris")
DTL = DecisionTreeLearner(iris)
print(DTL([5.1, 3.0, 1.1, 0.1]))
psource(RandomForest)
iris = DataSet(name="iris")
DTL = RandomForest(iris)
print(DTL([5.1, 3.0, 1.1, 0.1]))
dataset = iris
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr])
for gv in target_vals
for attr in dataset.inputs}
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
print(target_dist['setosa'])
print(attr_dists['setosa', 0][5.0])
def predict(example):
def class_probability(targetval):
return (target_dist[targetval] *
product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(target_vals, key=class_probability)
print(predict([5, 3, 1, 0.1]))
psource(NaiveBayesDiscrete)
means, deviations = dataset.find_means_and_deviations()
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
print(means["setosa"])
print(deviations["versicolor"])
def predict(example):
def class_probability(targetval):
prob = target_dist[targetval]
for attr in dataset.inputs:
prob *= gaussian(means[targetval][attr], deviations[targetval][attr], example[attr])
return prob
return argmax(target_vals, key=class_probability)
print(predict([5, 3, 1, 0.1]))
psource(NaiveBayesContinuous)
psource(NaiveBayesSimple)
nBD = NaiveBayesLearner(iris, continuous=False)
print("Discrete Classifier")
print(nBD([5, 3, 1, 0.1]))
print(nBD([6, 5, 3, 1.5]))
print(nBD([7, 3, 6.5, 2]))
nBC = NaiveBayesLearner(iris, continuous=True)
print("\nContinuous Classifier")
print(nBC([5, 3, 1, 0.1]))
print(nBC([6, 5, 3, 1.5]))
print(nBC([7, 3, 6.5, 2]))
bag1 = 'a'*50 + 'b'*30 + 'c'*15
dist1 = CountingProbDist(bag1)
bag2 = 'a'*30 + 'b'*45 + 'c'*20
dist2 = CountingProbDist(bag2)
bag3 = 'a'*20 + 'b'*20 + 'c'*35
dist3 = CountingProbDist(bag3)
dist = {('First', 0.5): dist1, ('Second', 0.3): dist2, ('Third', 0.2): dist3}
nBS = NaiveBayesLearner(dist, simple=True)
print(nBS('aab')) # We can handle strings
print(nBS(['b', 'b'])) # And lists!
print(nBS('ccbcc'))
psource(PerceptronLearner)
iris = DataSet(name="iris")
iris.classes_to_numbers()
perceptron = PerceptronLearner(iris)
print(perceptron([5, 3, 1, 0.1]))
psource(LinearLearner)
iris = DataSet(name="iris")
iris.classes_to_numbers()
linear_learner = LinearLearner(iris)
print(linear_learner([5, 3, 1, 0.1]))
psource(EnsembleLearner)
iris = DataSet(name="iris")
nBD = NaiveBayesLearner(iris, continuous=False)
print("Error ratio for Discrete:", err_ratio(nBD, iris))
nBC = NaiveBayesLearner(iris, continuous=True)
print("Error ratio for Continuous:", err_ratio(nBC, iris))
kNN_1 = NearestNeighborLearner(iris, k=1)
kNN_3 = NearestNeighborLearner(iris, k=3)
kNN_5 = NearestNeighborLearner(iris, k=5)
kNN_7 = NearestNeighborLearner(iris, k=7)
print("Error ratio for k=1:", err_ratio(kNN_1, iris))
print("Error ratio for k=3:", err_ratio(kNN_3, iris))
print("Error ratio for k=5:", err_ratio(kNN_5, iris))
print("Error ratio for k=7:", err_ratio(kNN_7, iris))
iris2 = DataSet(name="iris")
iris2.classes_to_numbers()
perceptron = PerceptronLearner(iris2)
print("Error ratio for Perceptron:", err_ratio(perceptron, iris2))
psource(AdaBoost)
psource(WeightedLearner)
WeightedPerceptron = WeightedLearner(PerceptronLearner)
AdaboostLearner = AdaBoost(WeightedPerceptron, 5)
iris2 = DataSet(name="iris")
iris2.classes_to_numbers()
adaboost = AdaboostLearner(iris2)
adaboost([5, 3, 1, 0.1])
print("Error ratio for adaboost: ", err_ratio(adaboost, iris2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CONTENTS
Step2: Class Attributes
Step3: To check that we imported the correct dataset, we can do the following
Step4: Which correctly prints the first line in the csv file and the list of attribute indexes.
Step5: Attributes
Step6: Then we will print attrs, attrnames, target, input. Notice how attrs holds values in [0,4], but since the fourth attribute is the target, inputs holds values in [0,3].
Step7: Now we will print all the possible values for the first feature/attribute.
Step8: Finally we will print the dataset's name and source. Keep in mind that we have not set a source for the dataset, so in this case it is empty.
Step9: A useful combination of the above is dataset.values[dataset.target] which returns the possible values of the target. For classification problems, this will return all the possible classes. Let's try it
Step10: Helper Functions
Step11: Currently the iris dataset has three classes, setosa, virginica and versicolor. We want though to convert it to a binary class dataset (a dataset with two classes). The class we want to remove is "virginica". To accomplish that we will utilize the helper function remove_examples.
Step12: We also have classes_to_numbers. For a lot of the classifiers in the module (like the Neural Network), classes should have numerical values. With this function we map string class names to numbers.
Step13: As you can see "setosa" was mapped to 0.
Step14: IRIS VISUALIZATION
Step15: You can play around with the values to get a good look at the dataset.
Step16: Euclidean Distance (euclidean_distance)
Step17: Hamming Distance (hamming_distance)
Step18: Mean Boolean Error (mean_boolean_error)
Step19: Mean Error (mean_error)
Step20: Mean Square Error (ms_error)
Step21: Root of Mean Square Error (rms_error)
Step22: PLURALITY LEARNER CLASSIFIER
Step23: It takes as input a dataset and returns a function. We can later call this function with the item we want to classify as the argument and it returns the class it should be classified in.
Step24: The output for the above code is "mammal", since that is the most popular and common class in the dataset.
Step25: It takes as input a dataset and k (default value is 1) and it returns a function, which we can later use to classify a new item.
Step26: The output of the above code is "setosa", which means the flower with the above measurements is of the "setosa" species.
Step27: Implementation
Step28: DecisionFork holds the attribute, which is tested at that node, and a dict of branches. The branches store the child nodes, one for each of the attribute's values. Calling an object of this class as a function with input tuple as an argument returns the next node in the classification path based on the result of the attribute test.
Step29: The leaf node stores the class label in result. All input tuples' classification paths end on a DecisionLeaf whose result attribute decide their class.
Step30: The implementation of DecisionTreeLearner provided in learning.py uses information gain as the metric for selecting which attribute to test for splitting. The function builds the tree top-down in a recursive manner. Based on the input it makes one of the four choices
Step31: As expected, the Decision Tree learner classifies the sample as "setosa" as seen in the previous section.
Step32: This algorithm creates an ensemble of decision trees using bagging and feature bagging. It takes 'm' examples randomly from the total number of examples and then perform feature bagging with probability p to retain an attribute. All the predictors are predicted from the DecisionTreeLearner and then a final prediction is made.
Step33: As expected, the Random Forest classifies the sample as "setosa".
Step34: First we found the different values for the classes (called targets here) and calculated their distribution. Next we initialized a dictionary of CountingProbDist objects, one for each class and feature. Finally, we iterated through the examples in the dataset and calculated the needed probabilites.
Step35: You can view the complete code by executing the next line
Step36: Continuous
Step37: You can see the means of the features for the "Setosa" class and the deviations for "Versicolor".
Step38: The complete code of the continuous algorithm
Step39: Simple
Step40: This classifier is useful when you already have calculated the distributions and you need to predict future items.
Step41: Notice how the Discrete Classifier misclassified the second item, while the Continuous one had no problem.
Step42: Now that we have the CountingProbDist objects for each bag/class, we will create the dictionary. We assume that it is equally probable that we will pick from any bag.
Step43: Now we can start making predictions
Step44: The results make intuitive sence. The first bag has a high amount of 'a's, the second has a high amount of 'b's and the third has a high amount of 'c's. The classifier seems to confirm this intuition.
Step45: Note that the Perceptron is a one-layer neural network, without any hidden layers. So, in BackPropagationLearner, we will pass no hidden layers. From that function we get our network, which is just one layer, with the weights calculated.
Step46: The correct output is 0, which means the item belongs in the first class, "setosa". Note that the Perceptron algorithm is not perfect and may produce false classifications.
Step47: This algorithm first assigns some random weights to the input variables and then based on the error calculated updates the weight for each variable. Finally the prediction is made with the updated weights.
Step48: ENSEMBLE LEARNER
Step49: This algorithm takes input as a list of learning algorithms, have them vote and then finally returns the predicted result.
Step50: Naive Bayes
Step51: The error for the Naive Bayes algorithm is very, very low; close to 0. There is also very little difference between the discrete and continuous version of the algorithm.
Step52: Notice how the error became larger and larger as k increased. This is generally the case with datasets where classes are spaced out, as is the case with the iris dataset. If items from different classes were closer together, classification would be more difficult. Usually a value of 1, 3 or 5 for k suffices.
Step53: The Perceptron didn't fare very well mainly because the dataset is not linearly separated. On simpler datasets the algorithm performs much better, but unfortunately such datasets are rare in real life scenarios.
Step54: AdaBoost takes as inputs
Step55: The WeightedLearner function will then call the PerceptronLearner, during each iteration, with the modified dataset which contains the examples according to the weights associated with them.
Step56: That is the correct answer. Let's check the error rate of adaboost with perceptron.
|
7,885
|
<ASSISTANT_TASK:>
Python Code:
set1={"pop", "rock", "soul", "hard rock", "rock", "R&B", "rock", "disco"}
set1
album_list =[ "Michael Jackson", "Thriller", 1982, "00:42:19", \
"Pop, Rock, R&B", 46.0, 65, "30-Nov-82", None, 10.0]
album_set = set(album_list)
album_set
music_genres = set(["pop", "pop", "rock", "folk rock", "hard rock", "soul", \
"progressive rock", "soft rock", "R&B", "disco"])
music_genres
set(['rap','house','electronic music', 'rap'])
A=[1,2,2,1]
B=set([1,2,2,1])
print(sum(A)==sum(B))
print(sum(A))
print(sum(B))
A = set(["Thriller","Back in Black", "AC/DC"] )
A
A.add("NSYNC")
A
A.add("NSYNC")
A
A.remove("NSYNC")
A
"AC/DC" in A
album_set1 = set(["Thriller",'AC/DC', 'Back in Black'] )
album_set2 = set([ "AC/DC","Back in Black", "The Dark Side of the Moon"] )
album_set1, album_set2
album_set_3=album_set1 & album_set2
album_set_3
album_set1.difference(album_set2)
album_set2.difference(album_set1)
album_set1.intersection(album_set2)
album_set1.union(album_set2)
set(album_set1).issuperset(album_set2)
set(album_set2).issubset(album_set1)
set({"Back in Black", "AC/DC"}).issubset(album_set1)
album_set1.issuperset({"Back in Black", "AC/DC"})
album_set3 = album_set1.union(album_set2)
album_set3
album_set1.issubset(album_set3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The process of mapping is illustrated in the figure
Step2: Now let us create a set of genres
Step3: Convert the following list to a set ['rap','house','electronic music', 'rap']
Step4: <div align="right">
Step5: <div align="right">
Step6: We can add an element to a set using the add() method
Step7: If we add the same element twice, nothing will happen as there can be no duplicates in a set
Step8: We can remove an item from a set using the remove method
Step9: We can verify if an element is in the set using the in command
Step10: Working with sets
Step11: <a ><img src = "https
Step12: As both sets contain 'AC/DC' and 'Back in Black' we represent these common elements with the intersection of two circles.
Step13: We can find all the elements that are only contained in album_set1 using the difference method
Step14: We only consider elements in album_set1; all the elements in album_set2, including the intersection, are not included.
Step15: <a ><img src = "https
Step16: This corresponds to the intersection of the two circles
Step17: And you can check if a set is a superset or subset of another set, respectively, like this
Step18: Here is an example where issubset() is issuperset() is true
Step19: Create a new set “album_set3” that is the union of “album_set1” and “album_set2”
Step20: <div align="right">
|
7,886
|
<ASSISTANT_TASK:>
Python Code:
import sys
print(sys.version)
import numpy as np
import pandas as pd
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
def accuracy_score(truth, pred):
Returns accuracy score for input truth and predictions.
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
def predictions_0(data):
Model with no features. Always predicts a passenger did not survive.
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Sex')
int(data[:1]['Sex'] == "female")
def predictions_1(data):
Model with one feature:
- Predict a passenger survived if they are female.
predictions = []
for _, passenger in data.iterrows():
# Simple way of returning 1 if female, 0 if male
if(passenger['Sex']=="female"):
z = 1
else:
z = 0
predictions.append(z)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
def predictions_2(data):
Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10.
predictions = []
for _, passenger in data.iterrows():
if(passenger['Sex'] == "female"):
z = 1
elif(passenger['Sex'] == "male" and passenger["Age"] < 10):
z = 1
else:
z = 0
predictions.append(z)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Age < 18"])
# Note: exploration was done in R, as it lends itself better to
# data analysis. After building a decision tree, we will implement the logic
# from it in Python
def predictions_3(data):
Model with multiple features. Makes a prediction with an accuracy of at least 80%.
predictions = []
for _, passenger in data.iterrows():
if passenger['Sex'] == "male":
if passenger['Age'] >= 6.5:
z = 0
if passenger['Age'] < 6.5:
if passenger['SibSp'] >= 2.5:
z = 0
else: z = 1
else:
z = 0
if passenger['Sex'] == 'female':
if passenger['Pclass'] >= 2.5:
if passenger['Fare'] >= 23.35:
z = 0
if passenger['Fare'] < 23.35:
if passenger['Embarked'] == "S":
z = 0
if passenger['Embarked'] in ["C", "Q"]:
z = 1
if passenger['Pclass'] < 2.5:
z = 1
predictions.append(z)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship
Step3: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
Step5: Tip
Step6: Question 1
Step7: Answer
Step9: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction
Step10: Question 2
Step11: Answer
Step13: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction
Step14: Question 3
Step15: Answer
Step17: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
|
7,887
|
<ASSISTANT_TASK:>
Python Code:
# Problem 3.4, page 107 Anderson, Woessner and Hunt (2015)
# import Python libraries/functionality for use in this notebook
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import scipy.special
import sys, os
from mpl_toolkits.axes_grid1 import make_axes_locatable
# return current working directory
os.getcwd()
# Set the name of the path to the model working directory
dirname = "P3-4_Theis"
datapath = os.getcwd()
modelpath = os.path.join(datapath, dirname)
print 'Name of model path: ', modelpath
# Now let's check if this directory exists. If not, then we will create it.
if os.path.exists(modelpath):
print 'Model working directory already exists.'
else:
print 'Creating model working directory.'
os.mkdir(modelpath)
os.chdir(modelpath)
os.getcwd()
#Define an function, class, and object for Theis Well analysis
def well_function(u):
return scipy.special.exp1(u)
def theis(Q, T, S, r, t):
u = r ** 2 * S / 4. / T / t
s = Q / 4. / np.pi / T * well_function(u)
return s
class Well(object):
def __init__(self, x, y, rate, name):
self.x = float(x)
self.y = float(y)
self.rate = rate
self.name = name
self.swell = None
return
# Parameters needed to solve Theis
r = 500 # m
T = 2000 # m^2/d (100 m/d Kh x 20 m thick)
S = 0.00002 # unitless
t = 30. # days
#Q = pumping rate # m^3/d - but we'll enter it below in the well info
# Well information
well_list =[]
well_obj = Well(250, 250, 5760, "Well A") # 4000 L/min = 5760 m^3/d
well_list.append(well_obj)
well_list.append(Well(550, 250, 17280, "Well B")) # 12000 L/min = 17280 m^3/d
# Grid information as requested in problem
x = np.linspace(0, 800., 50) # x-direction 0 to 800 m, 50 m increments
y = np.linspace(0, 500., 50) # y-direction 0 to 500 m, 50 m increments
xgrid, ygrid = np.meshgrid(x, y) # make a grid with these coordinates
times = np.linspace(0.,30.,31) # linear interpolation of time from 0 to 30 days, make 30 increments days at 0.5
rdist = 25 # this sets the distance to plot drawdown over time
print times
#let's find the maximum drawdown
drawdown_grid_max = np.zeros(xgrid.shape, dtype=float)
for well_obj in well_list:
r = ((well_obj.x - xgrid)**2 + (well_obj.y - ygrid) ** 2) ** 0.5
s_max = theis(well_obj.rate, T, S, r, times[-1])
drawdown_grid_max += s_max
max_drawdown = np.max(drawdown_grid_max)
print max_drawdown
for cf in os.listdir(os.getcwd()):
if cf.endswith('.png'):
os.remove(cf)
# Note that this section of code is saving figures for animation - not plotting them!
from IPython.display import clear_output
# to make our plots of drawdown over time a one point, we can
# predefine the response as np.nan. That way, when we plot incrementally
# as we calculate through time, only the times for which calculations
# have been made will appear using plt.plot()
for well_obj in well_list:
well_obj.swell = np.ones_like(times)*np.nan
# using "enumerate" we get both the iterant (t) and a counter (i)
for i,t in enumerate(times):
# the following stuff just writes out a status message to the screen
clear_output()
perc_done = (i/float(len(times)-1)) * 100
sys.stdout.write('working on time {0}: {1:2.2f}% complete'.format(t,
perc_done))
if i < len(times):
sys.stdout.flush()
# here's the end of the silly shenanigans of plotting out status to the screen
# now we calculate the drawdown for each time.
drawdown_grid = np.zeros(xgrid.shape, dtype=float)
for well_obj in well_list:
r = ((well_obj.x - xgrid)**2 + (well_obj.y - ygrid) ** 2) ** 0.5
s = theis(well_obj.rate, T, S, r, t)
well_obj.swell[i] = (theis(well_obj.rate, T, S, rdist, t))
drawdown_grid += s
# drawdown contour map (map view)
plt.subplot(1, 3, 1, aspect='equal')
im = plt.contourf(xgrid,
ygrid,
drawdown_grid,
np.linspace(0,max_drawdown,10))
# optional color bar configuration
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im, cax=cax).ax.invert_yaxis()
for well_obj in well_list:
plt.text(well_obj.x, well_obj.y, well_obj.name)
plt.title('Drawdown at time = {0:.0f}'.format(t))
# Let's finish with a drawdown only plot --> make a second set of figures with only the
# make a plot
plt.subplot(1, 1, 1, aspect='equal')
im = plt.contourf(xgrid,
ygrid,
drawdown_grid,
np.linspace(0,max_drawdown,10))
plt.colorbar().ax.invert_yaxis()
for well_obj in well_list:
plt.text(well_obj.x, well_obj.y, well_obj.name)
plt.title('Drawdown at time = {0:.0f}'.format(t))
plt.savefig('s_only{0}.png'.format(i))
# for execution robustness, we need to determine where ffmpeg lives
# in general, you probably won't need to bother
import platform
from subprocess import check_output
if 'Windows' in platform.platform():
if '64bit' in platform.architecture()[0]:
ffmpeg_path = os.path.join(binpath, 'ffmpeg.exe')
else:
ffmpeg_path = os.path.join(binpath, 'win32', 'ffmpeg.exe')
else:
#Assume it is in path on macos
ffmpeg_path = 'ffmpeg'
print 'ffmpeg_path is: ', ffmpeg_path
figfiles = ['s_only%d.png']
anmfiles = ['Theis_movie1.mp4']
# note the tricky way we can iterate over the elements of
# two lists in pairs using zip (if you wanted to add more plots)
for figfile,anmfile in zip(figfiles,anmfiles):
try:
os.remove(anmfile)
print 'Deleted the existing animation: ', anmfile
except:
pass
# now we do a system call, making the movie using command line arguments
# for ffmpeg
output = check_output([ffmpeg_path,
'-f', 'image2',
'-i', figfile,
'-vcodec', 'libx264',
'-pix_fmt', 'yuv420p',
anmfile])
from IPython.display import HTML
from base64 import b64encode
video = open(anmfiles[0], "rb").read()
video_encoded = b64encode(video)
video_tag = '<video controls alt="test" src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
HTML(data=video_tag)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We want to explore drawdown as a function of time
Step2: We will want to normalize our plots
Step3: Clobber the PNG output files
Step4: Loop over time and make figures
Step5: Let's make an animation!
Step6: Finally, we can embed the drawdown movie into the notebook
|
7,888
|
<ASSISTANT_TASK:>
Python Code:
###########################################################################
#
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This solution, including any related sample code or data, is made available
# on an “as is,” “as available,” and “with all faults” basis, solely for
# illustrative purposes, and without warranty or representation of any kind.
# This solution is experimental, unsupported and provided solely for your
# convenience. Your use of it is subject to your agreements with Google, as
# applicable, and may constitute a beta feature as defined under those
# agreements. To the extent that you make any data available to Google in
# connection with your use of the solution, you represent and warrant that you
# have all necessary and appropriate rights, consents and permissions to permit
# Google to use and process that data. By using any portion of this solution,
# you acknowledge, assume and accept all risks, known and unknown, associated
# with its usage, including with respect to your deployment of any portion of
# this solution in your systems, or usage in connection with your business,
# if at all.
###########################################################################
################################################################################
######################### CHANGE BQ PROJECT NAME BELOW #########################
################################################################################
project_name = '' #add proj name and dataset
# Google credentials authentication libraries
from google.colab import auth
auth.authenticate_user()
# data processing libraries
import numpy as np
from numpy.core.numeric import NaN
import pandas as pd
import pandas_gbq
import datetime
# modeling and metrics
from scipy.optimize import least_squares
from statsmodels.tools.tools import add_constant
import statsmodels.api as sm
from sklearn.model_selection import train_test_split, cross_val_score, LeaveOneOut, KFold, LeavePOut
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from statsmodels.stats.stattools import durbin_watson
# Visualization
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
# Calculating Relative Importance
!pip install relativeImp
from relativeImp import relativeImp
# BigQuery Magics
'''
BigQuery magics are used to run BigQuery SQL queries in a python environment.
These queries can also be run in the BigQuery UI
'''
from google.cloud import bigquery
from google.cloud.bigquery import magics
magics.context.project = project_name #update your project name
client = bigquery.Client(project=magics.context.project)
%load_ext google.cloud.bigquery
bigquery.USE_LEGACY_SQL = False
################################################################################
######################### CHANGE BQ PROJECT NAME BELOW #########################
################################################################################
%%bigquery df
SELECT *
FROM `.RBA_demo.cleaned_data`; #update with project name.
df.head()
KPI_COL = "y1"
y = df[KPI_COL]
X = df[df.columns[df.columns != KPI_COL]].values
reg = Ridge().fit(X,y)
#reg = Lasso().fit(X,y)
#reg = ElasticNet().fit(X,y)
#reg = LinearRegression().fit(X,y)
reg.intercept_
coefficients = reg.coef_.tolist()
coefficients
#R-squared
reg.score(X,y)
# Generate predictions to calculate MAE, MSE, RMSE
Y_prediction = reg.predict(X)
mean_absolute_error(y,Y_prediction)
mean_squared_error(y,Y_prediction)
rmse = np.sqrt(mean_squared_error(y,Y_prediction))
rmse
yName = 'y1'
xNames = df[df.columns[df.columns != KPI_COL]].columns.to_list()
df_results = relativeImp(df, outcomeName = yName, driverNames = xNames)
df_results
residuals = Y_prediction - y
'''
Visually inspect linearity between target variable (y1) and predictions
'''
plt.plot(Y_prediction,y,'o',alpha=0.5)
plt.show()
'''
Visually inspect the residuals to confirm normality
'''
fig = sm.qqplot(residuals)
sns.kdeplot(residuals, label = '', shade = True)
plt.xlabel('Model Residuals'); plt.ylabel('Density'); plt.title('Distribution of Residuals');
'''
Visually inspect residuals to confirm constant variance
'''
plt.plot(residuals,'o',alpha=0.5)
plt.show()
'''
The Durbin Watson test is a statistical test for detecting autocorrelation of the
model residuals
'''
dw = durbin_watson(residuals)
print('Durbin-Watson',dw)
if dw < 1.5:
print('Positive autocorrelation', '\n')
elif dw > 2.5:
print('Negative autocorrelation', '\n')
else:
print('Little to no autocorrelation', '\n')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0) Dependencies
Step2: 1) Import dataset
Step3: 1.1) Define KPI column and feature set
Step4: 2) Build RBA Model
Step5: 2.1) Print the model coefficient results
Step6: 2.2) Print the model evaluation metrics
Step7: 3) Calculate contribution of each digitl
Step8: 4) Validate Linear Regression Model Assumptions
Step9: 4.2) Linearity
Step10: 4.3) Normality of Errors
Step11: 4.4) Absence of Multicollinearity
Step12: 4.6) Residual Autocorrelation Check
|
7,889
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
%matplotlib inline
matplotlib.style.use('seaborn')
import pandas as pd
from animerec.data import get_data
users, anime = get_data()
from sklearn.model_selection import train_test_split
train, test = train_test_split(users, test_size = 0.1) #let's split up the dataset into a train and test set.
train, valid = train_test_split(train, test_size = 0.2) #let's split up the dataset into a train and valid set.
from animerec.data import remove_users
train = remove_users(train, 10)
#define validation set
valid_users = valid['user_id']
valid_anime = valid['anime_id']
valid_ratings = valid['rating']
genres = anime.genre.apply(lambda x: str(x).split(","))
genres2 = genres.apply(pd.Series)
all_genres = []
for i in range(len(genres2.columns)):
genres2[i] = genres2[i].str.strip()
all_genres += map(lambda s: str(s).strip(), list(genres2[i].unique()))
all_genres = list(np.unique(all_genres))
genre_map = {}
for i, x in enumerate(all_genres): genre_map.update({x:i})
nGenres = len(all_genres)
indexed = anime.set_index('anime_id')
indexed.index.name = None
ind = indexed.ix[train.anime_id.get_values()]
train_genres = ind.genre.map(lambda x: [genre_map[j.strip()] for j in str(x).split(',')])
ohe_genres = np.zeros((len(train_genres), nGenres))
for i, row in enumerate(train_genres): ohe_genres[i, row] = 1
v_ind = indexed.ix[valid.anime_id.get_values()]
valid_genres = v_ind.genre.map(lambda x: [genre_map[j.strip()] for j in str(x).split(',')])
v_ohe_genres = np.zeros((len(valid_genres), nGenres))
for i, row in enumerate(valid_genres): v_ohe_genres[i, row] = 1
test_ind = indexed.ix[test.anime_id.get_values()]
test_genres = test_ind.genre.map(lambda x: [genre_map[j.strip()] for j in str(x).split(',')])
test_ohe_genres = np.zeros((len(test_genres), nGenres))
for i, row in enumerate(test_genres): test_ohe_genres[i, row] = 1
#initialize some local variables
nUsers = len(train.user_id.unique())
nAnime = len(train.anime_id.unique())
# we'll need some data structures in order to vectorize computations
from collections import defaultdict
user_ids = train.user_id
item_ids = train.anime_id
user_index = defaultdict(lambda: -1) # maps a user_id to the index in the bias term.
item_index = defaultdict(lambda: -1) # maps an anime_id to the index in the bias term.
counter = 0
for user in user_ids:
if user_index[user] == -1:
user_index[user] = counter
counter += 1
counter = 0
for item in item_ids:
if item_index[item] == -1:
item_index[item] = counter
counter += 1
#Terms needed for the latent factors.
k = 3; # hyper-parameter
import tensorflow as tf
y = tf.cast(tf.constant(train['rating'].as_matrix(), shape=[len(train),1]), tf.float32)
def objective(alpha, Bi, Bu, Gi, Gu, Pi, y, lam): #Gi, Gu = gamma_i, gamma_u = latent factors for items, users
'''
Like in the linear model, we need to construct the "full" matrix for each (user, item) pair. However, with the
addition of the latent factor terms, it will waste memory to hold each variable in its own tensor.
Instead, create one intermediary tensor to represent our prediction ("pred") and simply incrementally add to that
each additional variable.
'''
pred = tf.gather(Bi, train.anime_id.map(lambda _id: item_index[_id]).as_matrix()) #Bi_full
pred += tf.gather(Bu, train.user_id.map(lambda _id: user_index[_id]).as_matrix()) #Bu_full
Gi_full = tf.gather(Gi, train.anime_id.map(lambda _id: item_index[_id]).as_matrix()) #latent factors of items
Gu_full = tf.gather(Gu, train.user_id.map(lambda _id: user_index[_id]).as_matrix()) #latent factors of users
Pi_full = tf.matmul(tf.constant(ohe_genres, dtype=tf.float32), Pi)
pred += tf.expand_dims(tf.einsum('ij,ji->i', (Gi_full+Pi_full), tf.transpose(Gu_full)), 1)
pred += tf.tile(alpha, (len(train), 1)) #alpha_full
obj = tf.reduce_sum(abs(pred-y))
# regularization
obj += lam * tf.reduce_sum(Bi**2)
obj += lam * tf.reduce_sum(Bu**2)
obj += lam * tf.reduce_sum(Gi**2)
obj += lam * tf.reduce_sum(Gu**2)
return obj
#initialize alpha, Bi, Bu, Gi, Gu
alpha = tf.Variable(tf.constant([6.9], shape=[1, 1]))
Bi = tf.Variable(tf.constant([0.0]*nAnime, shape=[nAnime, 1]))
Bu = tf.Variable(tf.constant([0.0]*nUsers, shape=[nUsers, 1]))
Gi = tf.Variable(tf.random_normal([nAnime, k], stddev=0.35))
Gu = tf.Variable(tf.random_normal([nUsers, k], stddev=0.35))
Pi = tf.Variable(tf.random_normal([nGenres, k], stddev=0.35))
optimizer = tf.train.AdamOptimizer(0.01)
obj = objective(alpha, Bi, Bu, Gi, Gu, Pi, y, 1)
trainer = optimizer.minimize(obj)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
tLoss = []
vLoss = []
prev = 10e10
for iteration in range(500):
cvalues = sess.run([trainer, obj])
print("objective = " + str(cvalues[1]))
tLoss.append(cvalues[1])
if not iteration % 5:
cAlpha, cBi, cBu, cGi, cGu, cPi, cLoss = sess.run([alpha, Bi, Bu, Gi, Gu, Pi, obj])
indices = valid_users.map(lambda x: user_index[x])
bu = indices.map(lambda x: 0.0 if x == -1 else float(cBu[x]))
gu = indices.map(lambda x: np.zeros(k) if x == -1 else cGu[x])
gu = np.vstack(gu.as_matrix()).astype(np.float)
indices = valid_anime.map(lambda x: item_index[x])
bi = indices.map(lambda x: 0.0 if x == -1 else float(cBi[x]))
gi = indices.map(lambda x: np.zeros(k) if x == -1 else cGi[x])
gi = np.vstack(gi.as_matrix()).astype(np.float)
pi = np.dot(v_ohe_genres, cPi)
g = np.einsum('ij,ji->i', (gi+pi), np.transpose(gu))
preds = bu + bi + g + float(cAlpha)
MAE = 1.0/len(valid) * sum(abs(valid_ratings-preds))
vLoss.append(MAE)
if MAE > prev: break
else: prev = MAE
cAlpha, cBi, cBu, cGi, cGu, cLoss = sess.run([alpha, Bi, Bu, Gi, Gu, obj])
print("\nFinal train loss is ", cLoss)
fig, ax1 = plt.subplots()
plt.title('Linear model performance vs. iterations')
ax1.plot(tLoss, 'b-')
ax1.set_xlabel('Training Iterations')
ax1.set_ylabel('Train Loss')
ax2 = ax1.twinx()
ax2.plot(range(0, len(vLoss)*5, 5), vLoss, 'r.')
ax2.set_ylabel('Validation Classification MAE')
fig.tight_layout()
test_users = test['user_id']
test_anime = test['anime_id']
test_ratings = test['rating']
indices = test_users.map(lambda x: user_index[x])
bu = indices.map(lambda x: 0.0 if x == -1 else float(cBu[x]))
gu = indices.map(lambda x: np.zeros(k) if x == -1 else cGu[x])
gu = np.vstack(gu.as_matrix()).astype(np.float)
indices = test_anime.map(lambda x: item_index[x])
bi = indices.map(lambda x: 0.0 if x == -1 else float(cBi[x]))
gi = indices.map(lambda x: np.zeros(k) if x == -1 else cGi[x])
gi= np.vstack(gi.as_matrix()).astype(np.float)
pi = np.dot(test_ohe_genres, cPi)
g = np.einsum('ij,ji->i', (gi+pi), np.transpose(gu))
preds = bu + bi + g + float(cAlpha)
MAE = 1.0/len(test) * sum(abs(test_ratings-preds))
print ('MAE on test set is: ', float(MAE))
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(cPi)
print(pca.explained_variance_ratio_)
twodim = pca.transform(cPi)
plt.scatter(twodim[:,0], twodim[:,1])
for i, txt in enumerate(all_genres):
plt.annotate(txt, (twodim[i,0],twodim[i,1]))
removed = {'Dementia', 'Kids', 'Cars', 'Shoujo Ai', 'nan', 'Shounen Ai', 'Sports'}
for i, txt in enumerate(all_genres):
if txt in removed: continue
plt.scatter(twodim[i,0], twodim[i,1])
plt.annotate(txt, (twodim[i,0],twodim[i,1]))
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's plot the objective and see how it decreases.
Step2: We can see quite clearly that this model does not overfit, just like the linear model. L2 regularization looks to be more than enough. However, we also can see that after about 150 iterations, validation MAE mostly flatlines, which suggests a place for early stopping.
Step3: The cool part about adding features to our model is that we can plot them in their latent space and compare them to each other. Our item features are simply genres, and so we can get some understanding of what our model has learned by looking at the latent space.
Step4: So we lose out on about 15% of the variance when tossing the 3rd dimension. That stings a little bit, but it'll have to make do for our visualization.
Step5: What we can see from this plot is that most genres are pretty closely clustered. The obviously noteworthy genres are the outliers
Step6: As a proof of concept, I note that "Samurai" and "Historical" our extremely close together (at about [-0.4, -0.25]), and likewise for "Psychological" and "Mystery" (at about 0.2, -0.35). From here, we can note other very similar genres. "Josei" and "Seinen" are those intended for adult women and adult men, and they are not that far apart. In contrast, "Shoujo" and "Shounen" (intended for teenage women and men) are farther apart. Perhaps when you're old enough, quality matters more than superficial gender differences?
|
7,890
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
7,891
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import sys
import os
sys.path.insert(0,'..')
import graphmap
from graphmap.graphmap_main import GraphMap
from graphmap.memory_persistence import MemoryPersistence
G = GraphMap(MemoryPersistence())
from graphmap.graph_helpers import NodeLink
seattle_skyline_image_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/2/2f/Space_Needle002.jpg/640px-Space_Needle002.jpg'
mt_tacoma_image_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/a/a2/Mount_Rainier_from_the_Silver_Queen_Peak.jpg/1024px-Mount_Rainier_from_the_Silver_Queen_Peak.jpg'
seattle_node_link = NodeLink('seattle')
mt_tacoma_node_link = NodeLink('tacoma')
G.create_node(root_node_link=seattle_node_link, image_value_link=seattle_skyline_image_url)
G.create_node(root_node_link=mt_tacoma_node_link, image_value_link=mt_tacoma_image_url)
seattle_pil_image_result = G.get_image_at_quad_key(root_node_link=seattle_node_link, resolution=256, quad_key='')
mt_tacoma_pil_image_result = G.get_image_at_quad_key(root_node_link=mt_tacoma_node_link, resolution=256, quad_key='')
import matplotlib.pyplot as plt
plt.imshow(seattle_pil_image_result.value)
plt.figure()
plt.imshow(mt_tacoma_pil_image_result.value)
insert_quad_key = '13'
created_node_link_result = G.connect_child(root_node_link=seattle_node_link,
quad_key=insert_quad_key,
child_node_link=mt_tacoma_node_link,)
print(created_node_link_result)
created_node_link = created_node_link_result.value
new_seattle_image_result = G.get_image_at_quad_key(created_node_link, resolution=256, quad_key='')
new_seattle_image_result
plt.imshow(new_seattle_image_result.value)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First let us import the module and create a GraphMap that persists in memory.
Step2: Let us create two nodes with images of Seattle skyline and Mt. Tacoma from wikimedia.
Step3: Now that we have created the 'seattle' node let's see how it looks
Step4: Let us insert the 'tacoma' node into the 'seattle' node at the top right. The quad key we will use is 13. 1 correpsonds to the top right quadrant, inside that we will insert at bottom right hence 3.
Step5: Let us see how the new_seattle_node looks after the insertion.
|
7,892
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q -U pip
!pip install -q tensorflow==2.2.0
!pip install -q -U google-auth google-api-python-client google-api-core
import os
import tensorflow as tf
import numpy as np
print(f'Tensorflow version: {tf.__version__}')
PROJECT_ID = 'yourProject' # Change to your project.
BUCKET = 'yourBucketName' # Change to the bucket you created.
EMBEDDING_FILES_PATH = f'gs://{BUCKET}/bqml/item_embeddings/embeddings-*'
MODEL_OUTPUT_DIR = f'gs://{BUCKET}/bqml/embedding_lookup_model'
!gcloud config set project $PROJECT_ID
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
if tf.io.gfile.exists(MODEL_OUTPUT_DIR):
print("Removing {} contents...".format(MODEL_OUTPUT_DIR))
tf.io.gfile.rmtree(MODEL_OUTPUT_DIR)
from embeddings_lookup import lookup_creator
lookup_creator.export_saved_model(EMBEDDING_FILES_PATH, MODEL_OUTPUT_DIR)
!saved_model_cli show --dir {MODEL_OUTPUT_DIR} --tag_set serve --signature_def serving_default
loaded_model = tf.saved_model.load(MODEL_OUTPUT_DIR)
input_items = ['2114406', '2114402 2120788', 'abc123']
output = loaded_model(input_items)
print(f'Embeddings retrieved: {output.shape}')
for idx, embedding in enumerate(output):
print(f'{input_items[idx]}: {embedding[:5]}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import libraries
Step2: Configure GCP environment settings
Step3: Authenticate your GCP account
Step4: Create the embedding lookup model
Step5: Create the model and export the SavedModel file
Step6: Inspect the exported SavedModel using the saved_model_cli command line tool
Step7: Test the SavedModel file
|
7,893
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
def split_data(chars, batch_size, num_steps, split_frac=0.9):
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
with tf.name_scope("RNN_cells"):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
with tf.name_scope("RNN_init_state"):
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
with tf.name_scope("RNN_forward"):
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one row for each cell output
with tf.name_scope('sequence_reshape'):
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN outputs to a softmax layer and calculate the cost
with tf.name_scope('logits'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
tf.summary.histogram('softmax_w', softmax_w)
tf.summary.histogram('softmax_b', softmax_b)
with tf.name_scope('predictions'):
preds = tf.nn.softmax(logits, name='predictions')
tf.summary.histogram('predictions', preds)
with tf.name_scope('cost'):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
tf.summary.scalar('cost', cost)
# Optimizer for training, using gradient clipping to control exploding gradients
with tf.name_scope('train'):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
merged = tf.summary.merge_all()
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer', 'merged']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
!mkdir -p checkpoints/anna
epochs = 1
save_every_n = 100
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('./logs/2/train', sess.graph)
test_writer = tf.summary.FileWriter('./logs/2/test')
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
summary, batch_loss, new_state, _ = sess.run([model.merged, model.cost,
model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
train_writer.add_summary(summary, iteration)
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
summary, batch_loss, new_state = sess.run([model.merged, model.cost,
model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
test_writer.add_summary(summary, iteration)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
#saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use.
Step3: Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Step4: I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
Step5: Hyperparameters
Step6: Training
Step7: Sampling
|
7,894
|
<ASSISTANT_TASK:>
Python Code:
try:
import pint
except ImportError:
!pip install pint
import pint
try:
from modsim import *
except ImportError:
!pip install modsimpy
from modsim import *
!python --version
!jupyter-notebook --version
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
meter = UNITS.meter
second = UNITS.second
a = 9.8 * meter / second**2
t = 4 * second
a * t**2 / 2
# Solution
a * t
# Solution
# a + t
h = 381 * meter
t = sqrt(2 * h / a)
v = a * t
mile = UNITS.mile
hour = UNITS.hour
v.to(mile/hour)
# Solution
foot = UNITS.foot
pole_height = 10 * foot
h + pole_height
# Solution
pole_height + h
# Solution
v_terminal = 18 * meter / second
# Solution
t1 = v_terminal / a
print('Time to reach terminal velocity', t1)
# Solution
h1 = a * t1**2 / 2
print('Height fallen in t1', h1)
# Solution
t2 = (h - h1) / v_terminal
print('Time to fall remaining distance', t2)
# Solution
t_total = t1 + t2
print('Total falling time', t_total)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first time you run this on a new installation of Python, it might produce a warning message in pink. That's probably ok, but if you get a message that says modsim.py depends on Python 3.7 features, that means you have an older version of Python, and some features in modsim.py won't work correctly.
Step2: Configuring Jupyter
Step3: The penny myth
Step4: To find out what other units are defined, type UNITS. (including the period) in the next cell and then press TAB. You should see a pop-up menu with a list of units.
Step5: Create t and give it the value 4 seconds.
Step6: Compute the distance a penny would fall after t seconds with constant acceleration a. Notice that the units of the result are correct.
Step7: Exercise
Step8: Exercise
Step9: The error messages you get from Python are big and scary, but if you read them carefully, they contain a lot of useful information.
Step10: Compute the time it would take a penny to fall, assuming constant acceleration.
Step11: Given t, we can compute the velocity of the penny when it lands.
Step12: We can convert from one set of units to another like this
Step13: Exercise
Step14: Exercise
|
7,895
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
outcome_0 = np.array([1.0, 0.0])
outcome_1 = np.array([0.0, 1.0])
a = 0.75
b = 0.25
prob_bit = a*outcome_0 + b*outcome_1
X,Y = prob_bit
plt.figure()
ax = plt.gca()
ax.quiver(X,Y,angles='xy',scale_units='xy',scale=1)
ax.set_xlim([0,1])
ax.set_ylim([0,1])
plt.draw()
plt.show()
# imports for pyQuil (ignore for now)
from pyquil.quil import Program
import pyquil.forest as qvm
quantum_simulator = qvm.Connection()
import numpy as np
# pyQuil is based around operations (or gates) so we'll start with the most
# basic one: the identity operation, called I. I takes one argument, the index
# of the qubit that it should be applied to.
from pyquil.gates import I
# make a quantum program that allocates one qubit (qubit #0) and does nothing to it
p = Program(I(0))
# quantum states are called wavefunctions for historical reasons
# so we can run this basic program on our connection to the simulator.
# This call will return the state of our qubits after we run program p.
alpha, beta = quantum_simulator.wavefunction(p)
print "Our qubit is in the state alpha={} and beta={}".format(alpha, beta)
print "The probability of measuring the qubit in outcome 0 is {}".format(np.conj(alpha)*alpha)
print "The probability of measuring the qubit in outcome 1 is {}".format(np.conj(beta)*beta)
# we can import the qubit "flip" operation (called X), which we'll talk about in the next section
# and see what that does.
from pyquil.gates import X
p = Program(X(0))
alpha, beta = quantum_simulator.wavefunction(p)
print "Our qubit is in the state alpha={} and beta={}".format(alpha, beta)
print "The probability of measuring the qubit in outcome 0 is {}".format(np.conj(alpha)*alpha)
print "The probability of measuring the qubit in outcome 1 is {}".format(np.conj(beta)*beta)
# multiple qubits also produce the expected scaling of the state
p = Program(I(0), I(1))
print "The quantum state is of dimension:", len(quantum_simulator.wavefunction(p))
p = Program(I(0), I(1), I(2), I(3))
print "The quantum state is of dimension:", len(quantum_simulator.wavefunction(p))
p = Program()
for x in range(10):
p.inst(I(x))
print "The quantum state is of dimension:", len(quantum_simulator.wavefunction(p))
# wavefunction(Program) returns a coefficient array that corresponds to outcomes in the following order
print quantum_simulator.probabilities(Program(I(0), I(1)))
p = Program(I(0))
quantum_simulator.wavefunction(p)
from pyquil.gates import X, Y, Z
p = Program(X(0))
print "X|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p), "This looks like a bit flip.\n"
p = Program(Y(0))
print "Y|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(Z(0))
print "Z|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
# composing qubit operations is the same as multiplying multiple matrices in sequence
p = Program(X(0), Y(0), Z(0))
print "ZYX|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
from pyquil.gates import CNOT
p = Program(CNOT(0, 1))
print "CNOT|00> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(X(0), CNOT(0, 1))
print "CNOT|01> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(X(1), CNOT(0, 1))
print "CNOT|10> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(X(0), X(1), CNOT(0, 1))
print "CNOT|11> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
from pyquil.gates import SWAP
print "SWAP|01> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
# create a program that stores the outcome of measuring qubit #0 into classical register [0]
classical_register_index = 0
p = Program(I(0)).measure(0, classical_register_index)
# choose what classical registers to look in at the end of the computation
classical_regs = [0, 1]
quantum_simulator.run(p, classical_regs)
classical_register_index = 0
p = Program(X(0)) # flip the qubit
p.measure(0, classical_register_index) # measure the qubit
classical_regs = [0, 1]
quantum_simulator.run(p, classical_regs)
classical_register_index = 0
p = Program(X(0)) # flip the qubit
p.measure(0, classical_register_index) # measure the qubit
classical_regs = [0]
trials = 10
quantum_simulator.run(p, classical_regs, trials)
from pyquil.gates import H
# The Hadamard produces what is called a superposition state
coin_program = Program(H(0))
print "H|0> = ", quantum_simulator.wavefunction(coin_program)
print "With outcome probabilities\n", quantum_simulator.probabilities(coin_program)
# introduce measurement
classical_reg = 0
coin_program = Program(H(0)).measure(0, classical_reg)
trials = 10
# we see probabilistic results of about half 1's and half 0's
quantum_simulator.run(coin_program, [0], trials)
classical_reg = 0
coin_program = Program(H(0))
print "Before measurement: H|0> = ", quantum_simulator.wavefunction(coin_program)
coin_program.measure(0, classical_reg)
for x in range(5):
print "After measurement: ", quantum_simulator.wavefunction(coin_program)
# This happens with bigger systems too
classical_reg = 0
# this program prepares something called a Bell state (a special kind of "entangled state")
bell_program = Program(H(0), CNOT(0, 1))
print "Before measurement: H|0> = ", quantum_simulator.wavefunction(bell_program)
bell_program.measure(0, classical_reg)
for x in range(5):
print "After measurement: ", quantum_simulator.probabilities(bell_program)
if_prog = Program(X(7)) # if branch
else_prog = Program(I(7)) # else branch
p = Program(X(0)).measure(0, 1).quil_if(1, if_prog, else_prog) # branch on classical reg [1]
p.measure(7, 7) # measure qubit #7 into classical register [7]
quantum_simulator.run(p, [7]) # run and check register [7]
inside_loop = Program(H(0)).measure(0, 1)
p = Program().quil_while(1, inside_loop)
quantum_simulator.run(p, [1]) # run and check register [1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Given some state vector, like the one plotted above, we can find the probabilities associated to each outcome by projecting the vector onto the basis outcomes. This gives us a rule like the following
Step2: Qubit operations
Step3: Other standard gates on single qubits are given by the Pauli operator matrices
Step4: Operations can also be applied to composite states of multiple qubits. One common example is the controlled-not or CNOT gate that works on two qubits. Its matrix form is
Step5: Another example is the SWAP gate
Step6: Quantum computing operations are then composed of a series of these complex matrices being applied to complex vectors. The reason that these matrices have to be unitary (i.e. that their complex conjugate transpose is their inverse) is that this preserves that the overall probability of all outcomes always sums to one.
Step7: So far we have used the quantum simulator to cheat a little bit. We have actually looked at the wavefunction that comes back, when, on real quantum hardware, we are unable to directly look at the wavefunction. Instead we only have access to the classical bits that are affected by measurements. This functionality is emulated by the qvm_run command.
Step8: We see that both registers are zero. However, if we had flipped the qubit before measurement then we obtain
Step9: These measurements are deterministic, e.g. if we make them multiple times the we always get the same outcome
Step10: However this is not the case in general. Looking at those examples will allow us to understand the way that measurements affect the quantum state as well. We will see that measurements act like projections onto the outcome basis states. First we introduce the Hadamard matrix
Step11: A qubit in this state will half the time be measured to be in 0 and half the time be measured to be in 1. In essence this qubit truly is a random variable representing a coin. In fact, there are many wavefunctions that will give this same operational outcome. There is a continuous family of states of the form
Step12: Another interesting thing to do is to look at the wavefunction AFTER a measurement.
Step13: We can clearly see that measurement has an effect on the quantum state independent of what is stored classically. We begin in a state that has a 50-50 probability of being one or zero. After measurement, the state changes into being 100% 0 or 100% 1 depending on which outcome was obtained. This is the phenomemon reffered to as wavefunction "collapse". Mathematically, the wavefunction is being projected onto the vector of the obtained outcome.
Step14: The above program prepares "entanglement" because even though their are random outcomes, after all measurement both qubits are the same. They are either both 0 or both 1. This kind of special correllation is the sort of thing quantum mechanics is famous for.
Step15: A [1] here means that qubit 7 was indeed flipped. ##DIAGRAMS
|
7,896
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time, os, json
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image, preprocess_image
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A', subtract_mean=True)
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')
def create_class_visualization(target_y, model, **kwargs):
Perform optimization over the image to generate class visualizations.
Inputs:
- target_y: Integer in the range [0, 100) giving the target class
- model: A PretrainedCNN that will be used for generation
Keyword arguments:
- learning_rate: Floating point number giving the learning rate
- blur_every: An integer; how often to blur the image as a regularizer
- l2_reg: Floating point number giving L2 regularization strength on the image;
this is lambda in the equation above.
- max_jitter: How much random jitter to add to the image as regularization
- num_iterations: How many iterations to run for
- show_every: How often to show the image
learning_rate = kwargs.pop('learning_rate', 10000)
blur_every = kwargs.pop('blur_every', 1)
l2_reg = kwargs.pop('l2_reg', 1e-6)
max_jitter = kwargs.pop('max_jitter', 4)
num_iterations = kwargs.pop('num_iterations', 100)
show_every = kwargs.pop('show_every', 25)
X = np.random.randn(1, 3, 64, 64)
for t in xrange(num_iterations):
# As a regularizer, add random jitter to the image
ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2)
X = np.roll(np.roll(X, ox, -1), oy, -2)
dX = None
############################################################################
# TODO: Compute the image gradient dX of the image with respect to the #
# target_y class score. This should be similar to the fooling images. Also #
# add L2 regularization to dX and update the image X using the image #
# gradient and the learning rate. #
############################################################################
pass
############################################################################
# END OF YOUR CODE #
############################################################################
# Undo the jitter
X = np.roll(np.roll(X, -ox, -1), -oy, -2)
# As a regularizer, clip the image
X = np.clip(X, -data['mean_image'], 255.0 - data['mean_image'])
# As a regularizer, periodically blur the image
if t % blur_every == 0:
X = blur_image(X)
# Periodically show the image
if t % show_every == 0:
plt.imshow(deprocess_image(X, data['mean_image']))
plt.gcf().set_size_inches(3, 3)
plt.axis('off')
plt.show()
return X
target_y = 43 # Tarantula
print data['class_names'][target_y]
X = create_class_visualization(target_y, model, show_every=25)
def invert_features(target_feats, layer, model, **kwargs):
Perform feature inversion in the style of Mahendran and Vedaldi 2015, using
L2 regularization and periodic blurring.
Inputs:
- target_feats: Image features of the target image, of shape (1, C, H, W);
we will try to generate an image that matches these features
- layer: The index of the layer from which the features were extracted
- model: A PretrainedCNN that was used to extract features
Keyword arguments:
- learning_rate: The learning rate to use for gradient descent
- num_iterations: The number of iterations to use for gradient descent
- l2_reg: The strength of L2 regularization to use; this is lambda in the
equation above.
- blur_every: How often to blur the image as implicit regularization; set
to 0 to disable blurring.
- show_every: How often to show the generated image; set to 0 to disable
showing intermediate reuslts.
Returns:
- X: Generated image of shape (1, 3, 64, 64) that matches the target features.
learning_rate = kwargs.pop('learning_rate', 10000)
num_iterations = kwargs.pop('num_iterations', 500)
l2_reg = kwargs.pop('l2_reg', 1e-7)
blur_every = kwargs.pop('blur_every', 1)
show_every = kwargs.pop('show_every', 50)
X = np.random.randn(1, 3, 64, 64)
for t in xrange(num_iterations):
############################################################################
# TODO: Compute the image gradient dX of the reconstruction loss with #
# respect to the image. You should include L2 regularization penalizing #
# large pixel values in the generated image using the l2_reg parameter; #
# then update the generated image using the learning_rate from above. #
############################################################################
pass
############################################################################
# END OF YOUR CODE #
############################################################################
# As a regularizer, clip the image
X = np.clip(X, -data['mean_image'], 255.0 - data['mean_image'])
# As a regularizer, periodically blur the image
if (blur_every > 0) and t % blur_every == 0:
X = blur_image(X)
if (show_every > 0) and (t % show_every == 0 or t + 1 == num_iterations):
plt.imshow(deprocess_image(X, data['mean_image']))
plt.gcf().set_size_inches(3, 3)
plt.axis('off')
plt.title('t = %d' % t)
plt.show()
filename = 'kitten.jpg'
layer = 3 # layers start from 0 so these are features after 4 convolutions
img = imresize(imread(filename), (64, 64))
plt.imshow(img)
plt.gcf().set_size_inches(3, 3)
plt.title('Original image')
plt.axis('off')
plt.show()
# Preprocess the image before passing it to the network:
# subtract the mean, add a dimension, etc
img_pre = preprocess_image(img, data['mean_image'])
# Extract features from the image
feats, _ = model.forward(img_pre, end=layer)
# Invert the features
kwargs = {
'num_iterations': 400,
'learning_rate': 5000,
'l2_reg': 1e-8,
'show_every': 100,
'blur_every': 10,
}
X = invert_features(feats, layer, model, **kwargs)
filename = 'kitten.jpg'
layer = 6 # layers start from 0 so these are features after 7 convolutions
img = imresize(imread(filename), (64, 64))
plt.imshow(img)
plt.gcf().set_size_inches(3, 3)
plt.title('Original image')
plt.axis('off')
plt.show()
# Preprocess the image before passing it to the network:
# subtract the mean, add a dimension, etc
img_pre = preprocess_image(img, data['mean_image'])
# Extract features from the image
feats, _ = model.forward(img_pre, end=layer)
# Invert the features
# You will need to play with these parameters.
kwargs = {
'num_iterations': 1000,
'learning_rate': 0,
'l2_reg': 0,
'show_every': 100,
'blur_every': 0,
}
X = invert_features(feats, layer, model, **kwargs)
def deepdream(X, layer, model, **kwargs):
Generate a DeepDream image.
Inputs:
- X: Starting image, of shape (1, 3, H, W)
- layer: Index of layer at which to dream
- model: A PretrainedCNN object
Keyword arguments:
- learning_rate: How much to update the image at each iteration
- max_jitter: Maximum number of pixels for jitter regularization
- num_iterations: How many iterations to run for
- show_every: How often to show the generated image
X = X.copy()
learning_rate = kwargs.pop('learning_rate', 5.0)
max_jitter = kwargs.pop('max_jitter', 16)
num_iterations = kwargs.pop('num_iterations', 100)
show_every = kwargs.pop('show_every', 25)
for t in xrange(num_iterations):
# As a regularizer, add random jitter to the image
ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2)
X = np.roll(np.roll(X, ox, -1), oy, -2)
dX = None
############################################################################
# TODO: Compute the image gradient dX using the DeepDream method. You'll #
# need to use the forward and backward methods of the model object to #
# extract activations and set gradients for the chosen layer. After #
# computing the image gradient dX, you should use the learning rate to #
# update the image X. #
############################################################################
pass
############################################################################
# END OF YOUR CODE #
############################################################################
# Undo the jitter
X = np.roll(np.roll(X, -ox, -1), -oy, -2)
# As a regularizer, clip the image
mean_pixel = data['mean_image'].mean(axis=(1, 2), keepdims=True)
X = np.clip(X, -mean_pixel, 255.0 - mean_pixel)
# Periodically show the image
if t == 0 or (t + 1) % show_every == 0:
img = deprocess_image(X, data['mean_image'], mean='pixel')
plt.imshow(img)
plt.title('t = %d' % (t + 1))
plt.gcf().set_size_inches(8, 8)
plt.axis('off')
plt.show()
return X
def read_image(filename, max_size):
Read an image from disk and resize it so its larger side is max_size
img = imread(filename)
H, W, _ = img.shape
if H >= W:
img = imresize(img, (max_size, int(W * float(max_size) / H)))
elif H < W:
img = imresize(img, (int(H * float(max_size) / W), max_size))
return img
filename = 'kitten.jpg'
max_size = 256
img = read_image(filename, max_size)
plt.imshow(img)
plt.axis('off')
# Preprocess the image by converting to float, transposing,
# and performing mean subtraction.
img_pre = preprocess_image(img, data['mean_image'], mean='pixel')
out = deepdream(img_pre, 7, model, learning_rate=2000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TinyImageNet and pretrained model
Step3: # Class visualization
Step4: You can use the code above to generate some cool images! An example is shown below. Try to generate a cool-looking image. If you want you can try to implement the other regularization schemes from Yosinski et al, but it isn't required.
Step6: Feature Inversion
Step7: Shallow feature reconstruction
Step8: Deep feature reconstruction
Step10: DeepDream
Step12: Generate some images!
|
7,897
|
<ASSISTANT_TASK:>
Python Code:
def f(x):
return x**3 + 4*x**2 -3
x = np.linspace(-3.1, 0, 100)
plt.plot(x, x**3 + 4*x**2 -3)
a = -3.0
b = -0.5
c = 0.5*(a+b)
plt.text(a,-1,"a")
plt.text(b,-1,"b")
plt.text(c,-1,"c")
plt.scatter([a,b,c], [f(a), f(b),f(c)], s=50, facecolors='none')
plt.scatter([a,b,c], [0,0,0], s=50, c='red')
xaxis = plt.axhline(0);
x = np.linspace(-3.1, 0, 100)
plt.plot(x, x**3 + 4*x**2 -3)
d = 0.5*(b+c)
plt.text(d,-1,"d")
plt.text(b,-1,"b")
plt.text(c,-1,"c")
plt.scatter([d,b,c], [f(d), f(b),f(c)], s=50, facecolors='none')
plt.scatter([d,b,c], [0,0,0], s=50, c='red')
xaxis = plt.axhline(0);
def f(x):
return (x**3-2*x+7)/(x**4+2)
x = np.arange(-3,5, 0.1);
y = f(x)
p1=plt.plot(x, y)
plt.xlim(-3, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
t = np.arange(-10, 5., 0.1)
x0=-1.2
x1=-0.5
xvals = []
xvals.append(x0)
xvals.append(x1)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--']
while (notconverge==1 and count < 3):
slope=(f(xvals[count+1])-f(xvals[count]))/(xvals[count+1]-xvals[count])
intercept=-slope*xvals[count+1]+f(xvals[count+1])
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(f(nextval)) < 0.001:
notconverge=0
else:
xvals.append(nextval)
count = count+1
plt.show()
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Example Function')
plt.show()
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Good Guess')
t = np.arange(-5, 5., 0.1)
x0=-1.5
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval=(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
slope=-((4*xvals[count]**3 *(7 - 2 *xvals[count] + xvals[count]**3))/(2 + xvals[count]**4)**2) + (-2 + 3 *xvals[count]**2)/(2 + xvals[count]**4)
intercept=-slope*xvals[count]+(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge=0
else:
xvals.append(nextval)
count = count+1
plt.show()
funval
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Bad Guess')
t = np.arange(-5, 5., 0.1)
x0=-0.5
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval=(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
slope=-((4*xvals[count]**3 *(7 - 2 *xvals[count] + xvals[count]**3))/(2 + xvals[count]**4)**2) + (-2 + 3 *xvals[count]**2)/(2 + xvals[count]**4)
intercept=-slope*xvals[count]+(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge = 0
else:
xvals.append(nextval)
count = count+1
plt.show()
def f(x):
return x**3 - 2*x**2 - 11*x +12
def s(x):
return 3*x**2 - 4*x - 11
x = np.arange(-5,5, 0.1);
p1=plt.plot(x, f(x))
plt.xlim(-4, 5)
plt.ylim(-20, 22)
plt.xlabel('x')
plt.axhline(0)
plt.title('Basin of Attraction')
t = np.arange(-5, 5., 0.1)
x0=2.43
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval = f(xvals[count])
slope = s(xvals[count])
intercept=-slope*xvals[count]+funval
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge = 0
else:
xvals.append(nextval)
count = count+1
plt.show()
xvals[count-1]
p1=plt.plot(x, f(x))
plt.xlim(-4, 5)
plt.ylim(-20, 22)
plt.xlabel('x')
plt.axhline(0)
plt.title('Basin of Attraction')
t = np.arange(-5, 5., 0.1)
x0=2.349
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval = f(xvals[count])
slope = s(xvals[count])
intercept=-slope*xvals[count]+funval
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge = 0
else:
xvals.append(nextval)
count = count+1
plt.show()
xvals[count-1]
def f(x):
return (x - 2) * x * (x + 2)**2
x = np.arange(-5,5, 0.1);
plt.plot(x, f(x))
plt.xlim(-3.5, 0.5)
plt.ylim(-5, 16)
plt.xlabel('x')
plt.axhline(0)
plt.title("Quadratic Interpolation")
#First Interpolation
x0=np.array([-3,-2.5,-1.0])
y0=f(x0)
f2 = interp1d(x0, y0,kind='quadratic')
#Plot parabola
xs = np.linspace(-3, -1, num=10000, endpoint=True)
plt.plot(xs, f2(xs))
#Plot first triplet
plt.plot(x0, f(x0),'ro');
plt.scatter(x0, f(x0), s=50, c='yellow');
#New x value
xnew=xs[np.where(abs(f2(xs))==min(abs(f2(xs))))]
plt.scatter(np.append(xnew,xnew), np.append(0,f(xnew)), c='black');
#New triplet
x1=np.append([-3,-2.5],xnew)
y1=f(x1)
f2 = interp1d(x1, y1,kind='quadratic')
#New Parabola
xs = np.linspace(min(x1), max(x1), num=100, endpoint=True)
plt.plot(xs, f2(xs))
xnew=xs[np.where(abs(f2(xs))==min(abs(f2(xs))))]
plt.scatter(np.append(xnew,xnew), np.append(0,f(xnew)), c='green');
x = np.arange(-5,5, 0.1);
plt.plot(x, f(x))
plt.xlim(-3.5, 0.5)
plt.ylim(-5, 16)
plt.xlabel('x')
plt.axhline(0)
plt.title("Inverse Quadratic Interpolation")
#First Interpolation
x0=np.array([-3,-2.5,1])
y0=f(x0)
f2 = interp1d(y0, x0,kind='quadratic')
#Plot parabola
xs = np.linspace(min(f(x0)), max(f(x0)), num=10000, endpoint=True)
plt.plot(f2(xs), xs)
#Plot first triplet
plt.plot(x0, f(x0),'ro');
plt.scatter(x0, f(x0), s=50, c='yellow');
from scipy.optimize import minimize_scalar
def f(x):
return (x - 2) * x * (x + 2)**2
res = minimize_scalar(f)
res.x
x = np.arange(-5,5, 0.1);
p1=plt.plot(x, f(x))
plt.xlim(-4, 4)
plt.ylim(-10, 20)
plt.xlabel('x')
plt.axhline(0)
scipy.optimize.brentq(f,-1,.5)
scipy.optimize.brentq(f,.5,3)
scipy.optimize.newton(f,-3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can terminate the process whenever the function evaluated at the new midpoint is 'close enough' to zero. This method is an example of what are known as 'bracketed methods'. This means the root is 'bracketed' by the end-points (it is somewhere in between). Another class of methods are 'open methods' - the root need not be somewhere in between the end-points (but it usually needs to be close!)
Step2: The secant method has the advantage of fast convergence. While the bisection method has a linear convergence rate (i.e. error goes to zero at the rate that $h(x) = x$ goes to zero, the secant method has a convergence rate that is faster than linear, but not quite quadratic (i.e. $\sim x^\alpha$, where $\alpha = \frac{1+\sqrt{5}}2 \approx 1.6$)
Step3: From the graph, we see the zero is near -2. We make an initial guess of $$x=-1.5$$
Step4: In fact, the Newton-Rhapson method converges quadratically. However, NR (and the secant method) have a fatal flaw
Step5: We have stumbled on the horizontal asymptote. The algorithm fails to converge.
Step6: Convergence Rate
Step7: So that's the idea behind quadratic interpolation. Use a quadratic approximation, find the zero of interest, use that as a new point for the next quadratic approximation.
Step8: Convergence rate is approximately $1.8$. The advantage of the inverse method is that we will always have a real root (the parabola will always cross the x-axis). A serious disadvantage is that the initial points must be very close to the root or the method may not converge.
Step9: To find zeroes, use
|
7,898
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
Example:
- Minimize Rosenbrock's Function with Nelder-Mead.
- Plot of parameter convergence to function minimum.
Demonstrates:
- standard models
- minimal solver interface
- parameter trajectories using retall
# Nelder-Mead solver
from mystic.solvers import fmin
# Rosenbrock function
from mystic.models import rosen
# tools
import pylab
if __name__ == '__main__':
# initial guess
x0 = [0.8,1.2,0.7]
# use Nelder-Mead to minimize the Rosenbrock function
solution = fmin(rosen, x0, disp=0, retall=1)
allvecs = solution[-1]
# plot the parameter trajectories
pylab.plot([i[0] for i in allvecs])
pylab.plot([i[1] for i in allvecs])
pylab.plot([i[2] for i in allvecs])
# draw the plot
pylab.title("Rosenbrock parameter convergence")
pylab.xlabel("Nelder-Mead solver iterations")
pylab.ylabel("parameter value")
pylab.legend(["x", "y", "z"])
pylab.show()
Example:
- Minimize Rosenbrock's Function with Nelder-Mead.
- Dynamic plot of parameter convergence to function minimum.
Demonstrates:
- standard models
- minimal solver interface
- parameter trajectories using callback
- solver interactivity
# Nelder-Mead solver
from mystic.solvers import fmin
# Rosenbrock function
from mystic.models import rosen
# tools
from mystic.tools import getch
import pylab
pylab.ion()
# draw the plot
def plot_frame():
pylab.title("Rosenbrock parameter convergence")
pylab.xlabel("Nelder-Mead solver iterations")
pylab.ylabel("parameter value")
pylab.draw()
return
iter = 0
step, xval, yval, zval = [], [], [], []
# plot the parameter trajectories
def plot_params(params):
global iter, step, xval, yval, zval
step.append(iter)
xval.append(params[0])
yval.append(params[1])
zval.append(params[2])
pylab.plot(step,xval,'b-')
pylab.plot(step,yval,'g-')
pylab.plot(step,zval,'r-')
pylab.legend(["x", "y", "z"])
pylab.draw()
iter += 1
return
if __name__ == '__main__':
# initial guess
x0 = [0.8,1.2,0.7]
# suggest that the user interacts with the solver
print("NOTE: while solver is running, press 'Ctrl-C' in console window")
getch()
plot_frame()
# use Nelder-Mead to minimize the Rosenbrock function
solution = fmin(rosen, x0, disp=1, callback=plot_params, handler=True)
print(solution)
# don't exit until user is ready
getch()
Example:
- Minimize Rosenbrock's Function with Powell's method.
- Dynamic print of parameter convergence to function minimum.
Demonstrates:
- standard models
- minimal solver interface
- parameter trajectories using callback
# Powell's Directonal solver
from mystic.solvers import fmin_powell
# Rosenbrock function
from mystic.models import rosen
iter = 0
# plot the parameter trajectories
def print_params(params):
global iter
from numpy import asarray
print("Generation %d has best fit parameters: %s" % (iter,asarray(params)))
iter += 1
return
if __name__ == '__main__':
# initial guess
x0 = [0.8,1.2,0.7]
print_params(x0)
# use Powell's method to minimize the Rosenbrock function
solution = fmin_powell(rosen, x0, disp=1, callback=print_params, handler=False)
print(solution)
Example:
- Minimize Rosenbrock's Function with Powell's method.
Demonstrates:
- standard models
- minimal solver interface
- customized monitors
# Powell's Directonal solver
from mystic.solvers import fmin_powell
# Rosenbrock function
from mystic.models import rosen
# tools
from mystic.monitors import VerboseLoggingMonitor
if __name__ == '__main__':
print("Powell's Method")
print("===============")
# initial guess
x0 = [1.5, 1.5, 0.7]
# configure monitor
stepmon = VerboseLoggingMonitor(1,1)
# use Powell's method to minimize the Rosenbrock function
solution = fmin_powell(rosen, x0, itermon=stepmon)
print(solution)
import mystic
mystic.log_reader('log.txt')
import mystic
mystic.model_plotter(mystic.models.rosen, 'log.txt', kwds='-d -x 1 -b "-2:2:.1, -2:2:.1, 1"')
Example:
- Solve 8th-order Chebyshev polynomial coefficients with DE.
- Callable plot of fitting to Chebyshev polynomial.
- Monitor Chi-Squared for Chebyshev polynomial.
Demonstrates:
- standard models
- expanded solver interface
- built-in random initial guess
- customized monitors and termination conditions
- customized DE mutation strategies
- use of monitor to retrieve results information
# Differential Evolution solver
from mystic.solvers import DifferentialEvolutionSolver2
# Chebyshev polynomial and cost function
from mystic.models.poly import chebyshev8, chebyshev8cost
from mystic.models.poly import chebyshev8coeffs
# tools
from mystic.termination import VTR
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor
from mystic.tools import getch, random_seed
from mystic.math import poly1d
import pylab
pylab.ion()
# draw the plot
def plot_exact():
pylab.title("fitting 8th-order Chebyshev polynomial coefficients")
pylab.xlabel("x")
pylab.ylabel("f(x)")
import numpy
x = numpy.arange(-1.2, 1.2001, 0.01)
exact = chebyshev8(x)
pylab.plot(x,exact,'b-')
pylab.legend(["Exact"])
pylab.axis([-1.4,1.4,-2,8],'k-')
pylab.draw()
return
# plot the polynomial
def plot_solution(params,style='y-'):
import numpy
x = numpy.arange(-1.2, 1.2001, 0.01)
f = poly1d(params)
y = f(x)
pylab.plot(x,y,style)
pylab.legend(["Exact","Fitted"])
pylab.axis([-1.4,1.4,-2,8],'k-')
pylab.draw()
return
if __name__ == '__main__':
print("Differential Evolution")
print("======================")
# set range for random initial guess
ndim = 9
x0 = [(-100,100)]*ndim
random_seed(123)
# draw frame and exact coefficients
plot_exact()
# configure monitor
stepmon = VerboseMonitor(50)
# use DE to solve 8th-order Chebyshev coefficients
npop = 10*ndim
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=[-100]*ndim, max=[100]*ndim)
solver.SetGenerationMonitor(stepmon)
solver.enable_signal_handler()
solver.Solve(chebyshev8cost, termination=VTR(0.01), strategy=Best1Exp, \
CrossProbability=1.0, ScalingFactor=0.9, \
sigint_callback=plot_solution)
solution = solver.Solution()
# use monitor to retrieve results information
iterations = len(stepmon)
cost = stepmon.y[-1]
print("Generation %d has best Chi-Squared: %f" % (iterations, cost))
# use pretty print for polynomials
print(poly1d(solution))
# compare solution with actual 8th-order Chebyshev coefficients
print("\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs))
# plot solution versus exact coefficients
plot_solution(solution)
from mystic.solvers import DifferentialEvolutionSolver
print("\n".join([i for i in dir(DifferentialEvolutionSolver) if not i.startswith('_')]))
from mystic.termination import VTR, ChangeOverGeneration, And, Or
stop = Or(And(VTR(), ChangeOverGeneration()), VTR(1e-8))
from mystic.models import rosen
from mystic.monitors import VerboseMonitor
from mystic.solvers import DifferentialEvolutionSolver
solver = DifferentialEvolutionSolver(3,40)
solver.SetRandomInitialPoints([-10,-10,-10],[10,10,10])
solver.SetGenerationMonitor(VerboseMonitor(10))
solver.SetTermination(stop)
solver.SetObjective(rosen)
solver.SetStrictRanges([-10,-10,-10],[10,10,10])
solver.SetEvaluationLimits(generations=600)
solver.Solve()
print(solver.bestSolution)
from mystic.solvers import DifferentialEvolutionSolver
from mystic.math import Distribution
import numpy as np
import pylab
# build a mystic distribution instance
dist = Distribution(np.random.normal, 5, 1)
# use the distribution instance as the initial population
solver = DifferentialEvolutionSolver(3,20)
solver.SetSampledInitialPoints(dist)
# visualize the initial population
pylab.hist(np.array(solver.population).ravel())
pylab.show()
from mystic.constraints import *
from mystic.penalty import quadratic_equality
from mystic.coupler import inner
from mystic.math import almostEqual
from mystic.tools import random_seed
random_seed(213)
def test_penalize():
from mystic.math.measures import mean, spread
def mean_constraint(x, target):
return mean(x) - target
def range_constraint(x, target):
return spread(x) - target
@quadratic_equality(condition=range_constraint, kwds={'target':5.0})
@quadratic_equality(condition=mean_constraint, kwds={'target':5.0})
def penalty(x):
return 0.0
def cost(x):
return abs(sum(x) - 5.0)
from mystic.solvers import fmin
from numpy import array
x = array([1,2,3,4,5])
y = fmin(cost, x, penalty=penalty, disp=False)
assert round(mean(y)) == 5.0
assert round(spread(y)) == 5.0
assert round(cost(y)) == 4*(5.0)
def test_solve():
from mystic.math.measures import mean
def mean_constraint(x, target):
return mean(x) - target
def parameter_constraint(x):
return x[-1] - x[0]
@quadratic_equality(condition=mean_constraint, kwds={'target':5.0})
@quadratic_equality(condition=parameter_constraint)
def penalty(x):
return 0.0
x = solve(penalty, guess=[2,3,1])
assert round(mean_constraint(x, 5.0)) == 0.0
assert round(parameter_constraint(x)) == 0.0
assert issolution(penalty, x)
def test_solve_constraint():
from mystic.math.measures import mean
@with_mean(1.0)
def constraint(x):
x[-1] = x[0]
return x
x = solve(constraint, guess=[2,3,1])
assert almostEqual(mean(x), 1.0, tol=1e-15)
assert x[-1] == x[0]
assert issolution(constraint, x)
def test_as_constraint():
from mystic.math.measures import mean, spread
def mean_constraint(x, target):
return mean(x) - target
def range_constraint(x, target):
return spread(x) - target
@quadratic_equality(condition=range_constraint, kwds={'target':5.0})
@quadratic_equality(condition=mean_constraint, kwds={'target':5.0})
def penalty(x):
return 0.0
ndim = 3
constraints = as_constraint(penalty, solver='fmin')
#XXX: this is expensive to evaluate, as there are nested optimizations
from numpy import arange
x = arange(ndim)
_x = constraints(x)
assert round(mean(_x)) == 5.0
assert round(spread(_x)) == 5.0
assert round(penalty(_x)) == 0.0
def cost(x):
return abs(sum(x) - 5.0)
npop = ndim*3
from mystic.solvers import diffev
y = diffev(cost, x, npop, constraints=constraints, disp=False, gtol=10)
assert round(mean(y)) == 5.0
assert round(spread(y)) == 5.0
assert round(cost(y)) == 5.0*(ndim-1)
def test_as_penalty():
from mystic.math.measures import mean, spread
@with_spread(5.0)
@with_mean(5.0)
def constraint(x):
return x
penalty = as_penalty(constraint)
from numpy import array
x = array([1,2,3,4,5])
def cost(x):
return abs(sum(x) - 5.0)
from mystic.solvers import fmin
y = fmin(cost, x, penalty=penalty, disp=False)
assert round(mean(y)) == 5.0
assert round(spread(y)) == 5.0
assert round(cost(y)) == 4*(5.0)
def test_with_penalty():
from mystic.math.measures import mean, spread
@with_penalty(quadratic_equality, kwds={'target':5.0})
def penalty(x, target):
return mean(x) - target
def cost(x):
return abs(sum(x) - 5.0)
from mystic.solvers import fmin
from numpy import array
x = array([1,2,3,4,5])
y = fmin(cost, x, penalty=penalty, disp=False)
assert round(mean(y)) == 5.0
assert round(cost(y)) == 4*(5.0)
def test_with_mean():
from mystic.math.measures import mean, impose_mean
@with_mean(5.0)
def mean_of_squared(x):
return [i**2 for i in x]
from numpy import array
x = array([1,2,3,4,5])
y = impose_mean(5, [i**2 for i in x])
assert mean(y) == 5.0
assert mean_of_squared(x) == y
def test_with_mean_spread():
from mystic.math.measures import mean, spread, impose_mean, impose_spread
@with_spread(50.0)
@with_mean(5.0)
def constrained_squared(x):
return [i**2 for i in x]
from numpy import array
x = array([1,2,3,4,5])
y = impose_spread(50.0, impose_mean(5.0,[i**2 for i in x]))
assert almostEqual(mean(y), 5.0, tol=1e-15)
assert almostEqual(spread(y), 50.0, tol=1e-15)
assert constrained_squared(x) == y
def test_constrained_solve():
from mystic.math.measures import mean, spread
@with_spread(5.0)
@with_mean(5.0)
def constraints(x):
return x
def cost(x):
return abs(sum(x) - 5.0)
from mystic.solvers import fmin_powell
from numpy import array
x = array([1,2,3,4,5])
y = fmin_powell(cost, x, constraints=constraints, disp=False)
assert almostEqual(mean(y), 5.0, tol=1e-15)
assert almostEqual(spread(y), 5.0, tol=1e-15)
assert almostEqual(cost(y), 4*(5.0), tol=1e-6)
if __name__ == '__main__':
test_penalize()
test_solve()
test_solve_constraint()
test_as_constraint()
test_as_penalty()
test_with_penalty()
test_with_mean()
test_with_mean_spread()
test_constrained_solve()
from mystic.coupler import and_, or_, not_
from mystic.constraints import and_ as _and, or_ as _or, not_ as _not
if __name__ == '__main__':
import numpy as np
from mystic.penalty import linear_equality, quadratic_equality
from mystic.constraints import as_constraint
x = x1,x2,x3 = (5., 5., 1.)
f = f1,f2,f3 = (np.sum, np.prod, np.average)
k = 100
solver = 'fmin_powell' #'diffev'
ptype = quadratic_equality
# case #1: couple penalties into a single constraint
p1 = lambda x: abs(x1 - f1(x))
p2 = lambda x: abs(x2 - f2(x))
p3 = lambda x: abs(x3 - f3(x))
p = (p1,p2,p3)
p = [ptype(pi)(lambda x:0.) for pi in p]
penalty = and_(*p, k=k)
constraint = as_constraint(penalty, solver=solver)
x = [1,2,3,4,5]
x_ = constraint(x)
assert round(f1(x_)) == round(x1)
assert round(f2(x_)) == round(x2)
assert round(f3(x_)) == round(x3)
# case #2: couple constraints into a single constraint
from mystic.math.measures import impose_product, impose_sum, impose_mean
from mystic.constraints import as_penalty
from mystic import random_seed
random_seed(123)
t = t1,t2,t3 = (impose_sum, impose_product, impose_mean)
c1 = lambda x: t1(x1, x)
c2 = lambda x: t2(x2, x)
c3 = lambda x: t3(x3, x)
c = (c1,c2,c3)
k=1
solver = 'buckshot' #'diffev'
ptype = linear_equality #quadratic_equality
p = [as_penalty(ci, ptype) for ci in c]
penalty = and_(*p, k=k)
constraint = as_constraint(penalty, solver=solver)
x = [1,2,3,4,5]
x_ = constraint(x)
assert round(f1(x_)) == round(x1)
assert round(f2(x_)) == round(x2)
assert round(f3(x_)) == round(x3)
# etc: more coupling of constraints
from mystic.constraints import with_mean, discrete
@with_mean(5.0)
def meanie(x):
return x
@discrete(list(range(11)))
def integers(x):
return x
c = _and(integers, meanie)
x = c([1,2,3])
assert x == integers(x) == meanie(x)
x = c([9,2,3])
assert x == integers(x) == meanie(x)
x = c([0,-2,3])
assert x == integers(x) == meanie(x)
x = c([9,-200,344])
assert x == integers(x) == meanie(x)
c = _or(meanie, integers)
x = c([1.1234, 4.23412, -9])
assert x == meanie(x) and x != integers(x)
x = c([7.0, 10.0, 0.0])
assert x == integers(x) and x != meanie(x)
x = c([6.0, 9.0, 0.0])
assert x == integers(x) == meanie(x)
x = c([3,4,5])
assert x == integers(x) and x != meanie(x)
x = c([3,4,5.5])
assert x == meanie(x) and x != integers(x)
c = _not(integers)
x = c([1,2,3])
assert x != integers(x) and x != [1,2,3] and x == c(x)
x = c([1.1,2,3])
assert x != integers(x) and x == [1.1,2,3] and x == c(x)
c = _not(meanie)
x = c([1,2,3])
assert x != meanie(x) and x == [1,2,3] and x == c(x)
x = c([4,5,6])
assert x != meanie(x) and x != [4,5,6] and x == c(x)
c = _not(_and(meanie, integers))
x = c([4,5,6])
assert x != meanie(x) and x != integers(x) and x != [4,5,6] and x == c(x)
# etc: more coupling of penalties
from mystic.penalty import quadratic_inequality
p1 = lambda x: sum(x) - 5
p2 = lambda x: min(i**2 for i in x)
p = p1,p2
p = [quadratic_inequality(pi)(lambda x:0.) for pi in p]
p1,p2 = p
penalty = and_(*p)
x = [[1,2],[-2,-1],[5,-5]]
for xi in x:
assert p1(xi) + p2(xi) == penalty(xi)
penalty = or_(*p)
for xi in x:
assert min(p1(xi),p2(xi)) == penalty(xi)
penalty = not_(p1)
for xi in x:
assert bool(p1(xi)) != bool(penalty(xi))
penalty = not_(p2)
for xi in x:
assert bool(p2(xi)) != bool(penalty(xi))
Example:
- Minimize Rosenbrock's Function with Powell's method.
Demonstrates:
- standard models
- minimal solver interface
- parameter constraints solver and constraints factory decorator
- statistical parameter constraints
- customized monitors
# Powell's Directonal solver
from mystic.solvers import fmin_powell
# Rosenbrock function
from mystic.models import rosen
# tools
from mystic.monitors import VerboseMonitor
from mystic.math.measures import mean, impose_mean
if __name__ == '__main__':
print("Powell's Method")
print("===============")
# initial guess
x0 = [0.8,1.2,0.7]
# use the mean constraints factory decorator
from mystic.constraints import with_mean
# define constraints function
@with_mean(1.0)
def constraints(x):
# constrain the last x_i to be the same value as the first x_i
x[-1] = x[0]
return x
# configure monitor
stepmon = VerboseMonitor(1)
# use Powell's method to minimize the Rosenbrock function
solution = fmin_powell(rosen, x0, constraints=constraints, itermon=stepmon)
print(solution)
%%file spring.py
"a Tension-Compression String"
def objective(x):
x0,x1,x2 = x
return x0**2 * x1 * (x2 + 2)
bounds = [(0,100)]*3
# with penalty='penalty' applied, solution is:
xs = [0.05168906, 0.35671773, 11.28896619]
ys = 0.01266523
from mystic.symbolic import generate_constraint, generate_solvers, solve
from mystic.symbolic import generate_penalty, generate_conditions
equations =
1.0 - (x1**3 * x2)/(71785*x0**4) <= 0.0
(4*x1**2 - x0*x1)/(12566*x0**3 * (x1 - x0)) + 1./(5108*x0**2) - 1.0 <= 0.0
1.0 - 140.45*x0/(x2 * x1**2) <= 0.0
(x0 + x1)/1.5 - 1.0 <= 0.0
pf = generate_penalty(generate_conditions(equations), k=1e12)
if __name__ == '__main__':
from mystic.solvers import diffev2
result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=40,
gtol=500, disp=True, full_output=True)
print(result[0])
equations =
1.0 - (x1**3 * x2)/(71785*x0**4) <= 0.0
(4*x1**2 - x0*x1)/(12566*x0**3 * (x1 - x0)) + 1./(5108*x0**2) - 1.0 <= 0.0
1.0 - 140.45*x0/(x2 * x1**2) <= 0.0
(x0 + x1)/1.5 - 1.0 <= 0.0
from mystic.symbolic import generate_constraint, generate_solvers, solve
from mystic.symbolic import generate_penalty, generate_conditions
ineql, eql = generate_conditions(equations)
print("CONVERTED SYMBOLIC TO SINGLE CONSTRAINTS FUNCTIONS")
print(ineql)
print(eql)
print("\nTHE INDIVIDUAL INEQUALITIES")
for f in ineql:
print(f.__doc__)
print("\nGENERATED THE PENALTY FUNCTION FOR ALL CONSTRAINTS")
pf = generate_penalty((ineql, eql))
print(pf.__doc__)
x = [-0.1, 0.5, 11.0]
print("\nPENALTY FOR {}: {}".format(x, pf(x)))
equations =
1.0 - (x1**3 * x2)/(71785*x0**4) <= 0.0
(4*x1**2 - x0*x1)/(12566*x0**3 * (x1 - x0)) + 1./(5108*x0**2) - 1.0 <= 0.0
1.0 - 140.45*x0/(x2 * x1**2) <= 0.0
(x0 + x1)/1.5 - 1.0 <= 0.0
"a Tension-Compression String"
from spring import objective, bounds, xs, ys
from mystic.penalty import quadratic_inequality
def penalty1(x): # <= 0.0
return 1.0 - (x[1]**3 * x[2])/(71785*x[0]**4)
def penalty2(x): # <= 0.0
return (4*x[1]**2 - x[0]*x[1])/(12566*x[0]**3 * (x[1] - x[0])) + 1./(5108*x[0]**2) - 1.0
def penalty3(x): # <= 0.0
return 1.0 - 140.45*x[0]/(x[2] * x[1]**2)
def penalty4(x): # <= 0.0
return (x[0] + x[1])/1.5 - 1.0
@quadratic_inequality(penalty1, k=1e12)
@quadratic_inequality(penalty2, k=1e12)
@quadratic_inequality(penalty3, k=1e12)
@quadratic_inequality(penalty4, k=1e12)
def penalty(x):
return 0.0
if __name__ == '__main__':
from mystic.solvers import diffev2
result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, npop=40,
gtol=500, disp=True, full_output=True)
print(result[0])
Crypto problem in Google CP Solver.
Prolog benchmark problem
'''
Name : crypto.pl
Original Source: P. Van Hentenryck's book
Adapted by : Daniel Diaz - INRIA France
Date : September 1992
'''
def objective(x):
return 0.0
nletters = 26
bounds = [(1,nletters)]*nletters
# with penalty='penalty' applied, solution is:
# A B C D E F G H I J K L M N O P Q
xs = [ 5, 13, 9, 16, 20, 4, 24, 21, 25, 17, 23, 2, 8, 12, 10, 19, 7, \
# R S T U V W X Y Z
11, 15, 3, 1, 26, 6, 22, 14, 18]
ys = 0.0
# constraints
equations =
B + A + L + L + E + T - 45 == 0
C + E + L + L + O - 43 == 0
C + O + N + C + E + R + T - 74 == 0
F + L + U + T + E - 30 == 0
F + U + G + U + E - 50 == 0
G + L + E + E - 66 == 0
J + A + Z + Z - 58 == 0
L + Y + R + E - 47 == 0
O + B + O + E - 53 == 0
O + P + E + R + A - 65 == 0
P + O + L + K + A - 59 == 0
Q + U + A + R + T + E + T - 50 == 0
S + A + X + O + P + H + O + N + E - 134 == 0
S + C + A + L + E - 51 == 0
S + O + L + O - 37 == 0
S + O + N + G - 61 == 0
S + O + P + R + A + N + O - 82 == 0
T + H + E + M + E - 72 == 0
V + I + O + L + I + N - 100 == 0
W + A + L + T + Z - 34 == 0
var = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
# Let's say we know the vowels.
bounds[0] = (5,5) # A
bounds[4] = (20,20) # E
bounds[8] = (25,25) # I
bounds[14] = (10,10) # O
bounds[20] = (1,1) # U
from mystic.constraints import unique, near_integers, has_unique
from mystic.symbolic import generate_penalty, generate_conditions
pf = generate_penalty(generate_conditions(equations,var),k=1)
from mystic.penalty import quadratic_equality
@quadratic_equality(near_integers)
@quadratic_equality(has_unique)
def penalty(x):
return pf(x)
from numpy import round, hstack, clip
def constraint(x):
x = round(x).astype(int) # force round and convert type to int
x = clip(x, 1,nletters) #XXX: hack to impose bounds
x = unique(x, range(1,nletters+1))
return x
if __name__ == '__main__':
from mystic.solvers import diffev2
from mystic.monitors import Monitor, VerboseMonitor
mon = VerboseMonitor(50)
result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf,
constraints=constraint, npop=52, ftol=1e-8, gtol=1000,
disp=True, full_output=True, cross=0.1, scale=0.9, itermon=mon)
print(result[0])
Eq 10 in Google CP Solver.
Standard benchmark problem.
def objective(x):
return 0.0
bounds = [(0,10)]*7
# with penalty='penalty' applied, solution is:
xs = [6., 0., 8., 4., 9., 3., 9.]
ys = 0.0
# constraints
equations =
98527*x0 + 34588*x1 + 5872*x2 + 59422*x4 + 65159*x6 - 1547604 - 30704*x3 - 29649*x5 == 0.0
98957*x1 + 83634*x2 + 69966*x3 + 62038*x4 + 37164*x5 + 85413*x6 - 1823553 - 93989*x0 == 0.0
900032 + 10949*x0 + 77761*x1 + 67052*x4 - 80197*x2 - 61944*x3 - 92964*x5 - 44550*x6 == 0.0
73947*x0 + 84391*x2 + 81310*x4 - 1164380 - 96253*x1 - 44247*x3 - 70582*x5 - 33054*x6 == 0.0
13057*x2 + 42253*x3 + 77527*x4 + 96552*x6 - 1185471 - 60152*x0 - 21103*x1 - 97932*x5 == 0.0
1394152 + 66920*x0 + 55679*x3 - 64234*x1 - 65337*x2 - 45581*x4 - 67707*x5 - 98038*x6 == 0.0
68550*x0 + 27886*x1 + 31716*x2 + 73597*x3 + 38835*x6 - 279091 - 88963*x4 - 76391*x5 == 0.0
76132*x1 + 71860*x2 + 22770*x3 + 68211*x4 + 78587*x5 - 480923 - 48224*x0 - 82817*x6 == 0.0
519878 + 94198*x1 + 87234*x2 + 37498*x3 - 71583*x0 - 25728*x4 - 25495*x5 - 70023*x6 == 0.0
361921 + 78693*x0 + 38592*x4 + 38478*x5 - 94129*x1 - 43188*x2 - 82528*x3 - 69025*x6 == 0.0
from mystic.symbolic import generate_constraint, generate_solvers, solve
cf = generate_constraint(generate_solvers(solve(equations)))
if __name__ == '__main__':
from mystic.solvers import diffev2
result = diffev2(objective, x0=bounds, bounds=bounds, constraints=cf,
npop=4, gtol=1, disp=True, full_output=True)
print(result[0])
"Pressure Vessel Design"
def objective(x):
x0,x1,x2,x3 = x
return 0.6224*x0*x2*x3 + 1.7781*x1*x2**2 + 3.1661*x0**2*x3 + 19.84*x0**2*x2
bounds = [(0,1e6)]*4
# with penalty='penalty' applied, solution is:
xs = [0.72759093, 0.35964857, 37.69901188, 240.0]
ys = 5804.3762083
from mystic.symbolic import generate_constraint, generate_solvers, solve
from mystic.symbolic import generate_penalty, generate_conditions
equations =
-x0 + 0.0193*x2 <= 0.0
-x1 + 0.00954*x2 <= 0.0
-pi*x2**2*x3 - (4/3.)*pi*x2**3 + 1296000.0 <= 0.0
x3 - 240.0 <= 0.0
pf = generate_penalty(generate_conditions(equations), k=1e12)
if __name__ == '__main__':
from mystic.solvers import diffev2
from mystic.math import almostEqual
result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=40, gtol=500,
disp=True, full_output=True)
print(result[0])
Minimize: f = 2*x[0] + 1*x[1]
Subject to: -1*x[0] + 1*x[1] <= 1
1*x[0] + 1*x[1] >= 2
1*x[1] >= 0
1*x[0] - 2*x[1] <= 4
where: -inf <= x[0] <= inf
def objective(x):
x0,x1 = x
return 2*x0 + x1
equations =
-x0 + x1 - 1.0 <= 0.0
-x0 - x1 + 2.0 <= 0.0
x0 - 2*x1 - 4.0 <= 0.0
bounds = [(None, None),(0.0, None)]
# with penalty='penalty' applied, solution is:
xs = [0.5, 1.5]
ys = 2.5
from mystic.symbolic import generate_conditions, generate_penalty
pf = generate_penalty(generate_conditions(equations), k=1e3)
from mystic.symbolic import generate_constraint, generate_solvers, simplify
cf = generate_constraint(generate_solvers(simplify(equations)))
if __name__ == '__main__':
from mystic.solvers import fmin_powell
from mystic.math import almostEqual
result = fmin_powell(objective, x0=[0.0,0.0], bounds=bounds, constraint=cf,
penalty=pf, disp=True, full_output=True, gtol=3)
print(result[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: mystic
Step4: Diagnostic tools
Step6: NOTE IPython does not handle shell prompt interactive programs well, so the above should be run from a command prompt. An IPython-safe version is below.
Step8: Monitors
Step9: Solution trajectory and model plotting
Step11: Solver "tuning" and extension
Step12: Algorithm configurability
Step13: Solver population
Step14: EXERCISE
Step16: In addition to being able to generically apply information as a penalty, mystic provides the ability to construct constraints "operators" -- essentially applying kernel transformations that reduce optimizer search space to the space of solutions that satisfy the constraints. This can greatly accelerate convergence to a solution, as the space that the optimizer can explore is restricted.
Step19: Range (i.e. 'box') constraints
Step21: Penatly functions
Step24: "Operators" that directly constrain search space
Step27: Special cases
Step29: EXERCISE
Step32: Linear and quadratic constraints
|
7,899
|
<ASSISTANT_TASK:>
Python Code:
# In the tf.keras.layers package, layers are objects. To construct a layer,
# simply construct the object. Most layers take as a first argument the number
# of output dimensions / channels.
layer = tf.keras.layers.Dense(100)
# The number of input dimensions is often unnecessary, as it can be inferred
# the first time the layer is used, but it can be provided if you want to
# specify it manually, which is useful in some complex models.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
# To use a layer, simply call it.
layer(tf.zeros([10, 5]))
# Layers have many useful methods. For example, you can inspect all variables
# in a layer by calling layer.variables. In this case a fully-connected layer
# will have variables for weights and biases.
layer.variables
# The variables are also accessible through nice accessors
layer.kernel, layer.bias
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[input_shape[-1].value,
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.variables)
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.variables])
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The full list of pre-existing layers can be seen in the documentation. It includes Dense (a fully-connected layer),
Step2: Implementing custom layers
Step3: Note that you don't have to wait until build is called to create your variables, you can also create them in __init__.
Step4: Much of the time, however, models which compose many layers simply call one layer after the other. This can be done in very little code using tf.keras.Sequential
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.