Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
6,800
|
<ASSISTANT_TASK:>
Python Code:
from pylab import *
from copy import deepcopy
from matplotlib import animation, rc
from IPython.display import HTML
%matplotlib inline
rc('text', usetex=True)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 15}
matplotlib.rc('font', **font)
E1, E2, E3 = 0., 20., 0.
V12, V23 = 1., 1.
psi0 = array([1, 0, 0], dtype='complex')
Nt = int(1e4)
psi = zeros((Nt, 3), dtype='complex')
psi[0, :] = psi0
for E2, tf in zip(arange(4) * 20, [20, 200, 200, 200]):
times = linspace(0, tf, Nt)
H = array([[E1, V12, 0],
[V12, E2, V23],
[0, V23, E3]])
lambd, Q = eigh(H)
Q_inv = Q.T.conj()
for it in range(1, Nt):
psi[it, :] = Q_inv @ psi0
psi[it, :] = diag(np.exp(-1j * lambd * times[it])) @ psi[it, :]
psi[it, :] = Q @ psi[it, :]
plot(times, abs(psi) ** 2)
ylabel(r'$\|\Psi(t)\|^2$')
xlabel(r'$t$')
legend(['$\|\Psi(t)_1\|^2$', '$\|\Psi(t)_2\|^2$', '$\|\Psi(t)_3\|^2$'], loc=1)
figure()
y = cos(V12 ** 2 / E2 * times) ** 2
plot(times, y)
y = sin(V12 ** 2 / E2 * times) ** 2
plot(times, y)
plot(times, abs(psi[:, 0]) ** 2, label='$\|\Psi(t)_1\|^2$')
plot(times, abs(psi[:, 2]) ** 2, label='$\|\Psi(t)_3\|^2$')
ylabel(r'$\|\Psi(t)\|^2$')
xlabel(r'$t$')
legend(loc=1)
figure()
plot(times, abs(psi[:, 0]) ** 2, label='$\|\Psi(t)_1\|^2$')
y = cos(V12 ** 2 / E2 * times) ** 2
plot(times, y, label=r'$\cos(V_{12}^2 / (E_2 t)^2)$')
ylabel(r'$\|\Psi(t)\|^2$')
xlabel(r'$t$')
legend(loc=1)
ylim([.99, 1.01])
xlim([-.3, 3]);
def V(x, Z=1):
return -Z / sqrt(2 / Z ** 2 + x ** 2)
N = 2 ** 10
x0, x1 = -25, 25
x = linspace(x0, x1, N)
dx = (x1 - x0) / (N - 1)
H = diag(ones(N - 1), -1) - 2 * diag(ones(N)) + diag(ones(N - 1), 1)
H *= -1 / (2 * dx**2)
H += diag(V(x))
E, Psi_tot = eigh(H)
E_bound=E[E<0]
for k, E_ in enumerate(sorted(E_bound)[:3]):
print('E_{' + str(k) + '} = ' + "{:1.4f}".format(E_))
plot(x, Psi_tot[:, 0] / sqrt(dx), label=r'$\Psi_0(x)$')
plot(x, Psi_tot[:, 1] / sqrt(dx), label=r'$\Psi_1(x)$')
plot(x, Psi_tot[:, 2] / sqrt(dx), label=r'$\Psi_2(x)$')
legend(loc=1)
xlabel('x')
ylabel('$\Psi(t)$')
figure()
plot(x, V(x))
plot(x, E_bound * ones_like(x)[:, newaxis])
legend([r'$V(x)$', r'$E_0$', r'$E_1$', r'$E_2$'])
xlabel('x')
ylabel('Energy')
def E(t, E0, omega, n):
t_ = maximum(omega * t, 0)
t_ = minimum(t_, 2 * np.pi * n)
return E0 * sin(t_) * sin(t_ / (2 * n))
def A(t, E0, omega, n):
pref = -E0 / omega
t_ = maximum(omega * t, 0.)
t_ = minimum(t_, 2 * np.pi * n)
return pref * (cos(t_) * (n * n * cos(t_ / n) - n * n + 1) + n * sin(t_) *
sin(t_ / n) - 1) / (2 * (n * n - 1))
def vanish(V0, x, x0, x1):
V0 *= 2
xs, xe = x[0], x[-1]
potential = np.maximum(0, (V0 * (x - x0) / (xs - x0)))
return np.maximum(potential, (V0 * (x - x1) / (xe - x1)))
omega = .02
n = 3 / 2
E0 = .05
Z = 1
x0, x1 = -15, 15
dx = .1
x = arange(x0, x1, dx)
N = len(x)
p = fftfreq(N, d=dx / (2 * pi))
dt = 0.5
ts = np.arange(- pi / omega, 2 * np.pi * (n + .5) / omega, dt)
plot(ts, E(ts, E0, omega, n))
title('Electric field for n = 3/2')
xlabel('t')
ylabel('E(t)')
figure()
t_star = np.arange(-pi / omega, 2 * np.pi * (5 + .5) / omega, 0.01)
plot(t_star, E(t_star, E0, omega, 5))
xlabel('t')
ylabel('E(t)')
title('Electric field for n = 5')
figure()
plot(ts, A(ts, E0, omega, n))
xlabel('t')
ylabel('A(t)')
title('Magnetic potential field for n = 3/2')
figure()
plot(t_star, A(t_star, E0, omega, 5))
xlabel('t')
ylabel('A(t)')
title('Magnetic potential field for n = 5')
omega = .02
n = 3 / 2
E0 = .05
Z = 1
x0, x1 = -15, 15
xl, xr = -10, 10
d = x1 - xr
t_temp = np.linspace(0, 2 * np.pi * (n + .5) / omega, 1000)
A_max = max(A(t_temp, E0, omega, n)) # the maximum momentum is equal to the
# maximum value of the magnetic potential
p_tilde = n**2 * E0 /(n**2 - 1) / omega
print('dx using the approximation ',\
"{:1.4f}".format(pi / p_tilde), 'a.u.')
print('dx using the maximum of the momentum calculated numerically',\
"{:1.4f}".format(pi / A_max), 'a.u.')
print('dt using the approximation ',\
"{:1.4f}".format(2 * pi / p_tilde ** 2), 'a.u.')
print('dt using the maximum of the momentum calculated numerically',\
"{:1.4f}".format(2 * pi / A_max ** 2), 'a.u.')
print("{:1.4f}".format(p_tilde / (8 * d)), 'a.u. < tilde_V <',\
"{:1.4f}".format(p_tilde ** 3 / 2 ** 4), 'a.u.')
V_tilde = 5.
dx = pi / p_tilde
x = arange(x0, x1, dx)
N = len(x)
p = fftfreq(N, d=dx / (2 * pi))
dt = 2 * pi / p_tilde ** 2
ts = np.arange(0, 2 * np.pi * (n + .5) / omega, dt)
H = diag(ones(N - 1), -1) - 2 * diag(ones(N)) + diag(ones(N - 1), 1)
H *= -1 / (2 * dx ** 2)
H += diag(V(x, Z))
U_2 = exp(-1j * 0.5 * p ** 2 * dt)
_, Psi_tot = eigh(H)
Psi = Psi_tot[:, 0].astype('complex')
Psi /= np.sqrt(sum(abs(Psi) ** 2 * dx))
psi0 = deepcopy(Psi)
norm = np.zeros(len(ts))
overlap = np.zeros(len(ts))
for k, t in enumerate(ts):
U_1 = exp(-0.5 * 1j * (V(x, 1) - 1j *
vanish(V_tilde, x, xl, xr) - x * E(t, E0, omega, n)) * dt)
Psi *= U_1
Psi = fft(Psi)
Psi *= U_2
Psi = ifft(Psi) # go to real space
Psi *= U_1
norm[k] = sum(abs(Psi) ** 2 * dx)
overlap[k] = abs(vdot(Psi, psi0)) * dx
N_e = 20
ionizs = np.zeros(N_e)
norms = np.zeros((N_e, len(ts)))
for j, E0 in enumerate(np.linspace(0, .05, N_e)):
Psi = deepcopy(psi0)
for k, t in enumerate(ts):
U_1 = exp(-0.5 * 1j * (V(x, 1) - 1j * vanish(V_tilde, x, -10, 10) - x * E(t, E0, omega, n)) * dt)
Psi *= U_1
Psi = fft(Psi)
Psi *= U_2
Psi = ifft(Psi) # go to real space
Psi *= U_1
norms[j, k] = sum(abs(Psi) ** 2 * dx)
ionizs[j] = 1 - sum(abs(Psi) ** 2 * dx)
title('Ionization probabilies in time')
plot(ts, 1 - norms.T[:, ::-1])
legend([r'$E_0 = 0.05$ a.u.'])
xlabel(r't')
ylabel(r'$1 - |<\Psi(t)|\Psi(t)>|^2$')
figure()
title(r'ionization probabilies at $t_{end}$')
ylabel(r'$1 - |<\Psi(t_{end})|\Psi(t_{end})>|^2$')
xlabel(r'$E_0$')
plot(np.linspace(0, .05, N_e), ionizs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Superexchange in a three-level system.
Step2: (b)
Step3: 2. The one-dimensional soft-core potential.
Step4: 3. Ionization from a one-dimensional soft-core potential.
Step5: (b)
Step6: (f)
Step7: We see as expected that the lower the amplitude of the electric field the lower the final ionization probability will be.
|
6,801
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
FIELDS = {
'auth_read':'user', # Credentials used for reading data.
'sheets_sheet':'',
'sheets_tab':'',
'sheets_range':'',
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'sheets':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'sheet':{'field':{'name':'sheets_sheet','kind':'string','order':1,'default':''}},
'tab':{'field':{'name':'sheets_tab','kind':'string','order':2,'default':''}},
'range':{'field':{'name':'sheets_range','kind':'string','order':3,'default':''}},
'clear':True
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set Configuration
Step2: 3. Enter Sheet Clear Recipe Parameters
Step3: 4. Execute Sheet Clear
|
6,802
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.__version__
learning_rate = 0.01
momentum = 0.8
filenames = ["data/life_satisfaction.csv"]
n_epochs = 500
batch_size = 5
graph = tf.Graph()
with graph.as_default():
reader = tf.TextLineReader(skip_header_lines=1)
filename_queue = tf.train.string_input_producer(filenames, num_epochs=n_epochs)
record_id, record = reader.read(filename_queue)
record_defaults = [[''], [0.0], [0.0]]
country, gdp_per_capita, life_satisfaction = tf.decode_csv(record, record_defaults=record_defaults)
X_batch, y_batch = tf.train.batch([gdp_per_capita, life_satisfaction], batch_size=batch_size)
X_batch_reshaped = tf.reshape(X_batch, [-1, 1])
y_batch_reshaped = tf.reshape(y_batch, [-1, 1])
X = tf.placeholder_with_default(X_batch_reshaped, shape=[None, 1], name="X")
y = tf.placeholder_with_default(y_batch_reshaped, shape=[None, 1], name="y")
b = tf.Variable(0.0, name="b")
w = tf.Variable(tf.zeros([1, 1]), name="w")
y_pred = tf.add(tf.matmul(X / 10000, w), b, name="y_pred") # X @ w + b
mse = tf.reduce_mean(tf.square(y_pred - y), name="mse")
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(mse, global_step=global_step)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver()
from datetime import datetime
def logdir():
root_logdir = "tf_logs"
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
return "{}/run_{}/".format(root_logdir, now)
logdir()
summary_writer = tf.summary.FileWriter("tf_logs/run_number_1_solution/", graph=graph)
filenames = ["data/life_satisfaction.csv"]
n_epochs = 500
batch_size = 5
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("reader"):
reader = tf.TextLineReader(skip_header_lines=1)
filename_queue = tf.train.string_input_producer(filenames, num_epochs=n_epochs)
record_id, record = reader.read(filename_queue)
record_defaults = [[''], [0.0], [0.0]]
country, gdp_per_capita, life_satisfaction = tf.decode_csv(record, record_defaults=record_defaults)
X_batch, y_batch = tf.train.batch([gdp_per_capita, life_satisfaction], batch_size=batch_size)
X_batch_reshaped = tf.reshape(X_batch, [-1, 1])
y_batch_reshaped = tf.reshape(y_batch, [-1, 1])
with tf.name_scope("linear_model"):
X = tf.placeholder_with_default(X_batch_reshaped, shape=[None, 1], name="X")
y = tf.placeholder_with_default(y_batch_reshaped, shape=[None, 1], name="y")
b = tf.Variable(0.0, name="b")
w = tf.Variable(tf.zeros([1, 1]), name="w")
y_pred = tf.add(tf.matmul(X / 10000, w), b, name="y_pred") # X @ w + b
with tf.name_scope("train"):
mse = tf.reduce_mean(tf.square(y_pred - y), name="mse")
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(mse, global_step=global_step)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter("tf_logs/run_number_2_solution/", graph=graph)
country.name, gdp_per_capita.name, X_batch.name, y_batch.name
X.name, y.name, b.name, w.name, y_pred.name
mse.name, global_step.name, training_op.name
graph1 = tf.Graph()
with graph1.as_default():
a = tf.constant(1.0)
summary_writer = tf.summary.FileWriter(logdir(), graph=graph)
graph2 = tf.Graph()
with graph2.as_default():
a = tf.constant(1.0, name="a")
b = tf.Variable(2.0, name="b")
c = a * b
import time
time.sleep(1.1)
summary_writer = tf.summary.FileWriter(logdir(), graph=graph)
time.sleep(1.1)
filenames = ["data/life_satisfaction.csv"]
n_epochs = 500
batch_size = 5
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("reader"):
reader = tf.TextLineReader(skip_header_lines=1)
filename_queue = tf.train.string_input_producer(filenames, num_epochs=n_epochs)
record_id, record = reader.read(filename_queue)
record_defaults = [[''], [0.0], [0.0]]
country, gdp_per_capita, life_satisfaction = tf.decode_csv(record, record_defaults=record_defaults)
X_batch, y_batch = tf.train.batch([gdp_per_capita, life_satisfaction], batch_size=batch_size)
X_batch_reshaped = tf.reshape(X_batch, [-1, 1])
y_batch_reshaped = tf.reshape(y_batch, [-1, 1])
with tf.name_scope("linear_model"):
X = tf.placeholder_with_default(X_batch_reshaped, shape=[None, 1], name="X")
y = tf.placeholder_with_default(y_batch_reshaped, shape=[None, 1], name="y")
b = tf.Variable(0.0, name="b")
w = tf.Variable(tf.zeros([1, 1]), name="w")
y_pred = tf.add(tf.matmul(X / 10000, w), b, name="y_pred")
with tf.name_scope("train"):
mse = tf.reduce_mean(tf.square(y_pred - y), name="mse")
mse_summary = tf.summary.scalar('MSE', mse) # <= ADDED
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(mse, global_step=global_step)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(logdir(), graph)
with tf.Session(graph=graph) as sess:
init.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
while not coord.should_stop():
_, mse_summary_val, global_step_val = sess.run([training_op, mse_summary, global_step])
if global_step_val % 10 == 0:
summary_writer.add_summary(mse_summary_val, global_step_val)
except tf.errors.OutOfRangeError:
print("End of training")
coord.request_stop()
coord.join(threads)
saver.save(sess, "./my_life_satisfaction_model")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From previous notebooks
Step2: TensorBoard
Step3: 6.4) Print out the name of a few operations. Notice how the names now have the scope as a prefix.
Step4: Create a few different graphs and instantiate a different FileWriter for each one, using a different log directory every time (with the help of the logdir() function). Refresh TensorBoard and notice that you can browse any graph you want by selecting the appropriate run.
Step5: 6.3)
Step6: 6.4)
Step7: 6.5)
Step8: If we run logdir() twice within the same second, we will get the same directory name twice. To avoid this, let's wait a bit over 1 second here. In real life, this is quite unlikely to happen since training a model typically takes much longer than 1 second.
Step9: 6.6)
|
6,803
|
<ASSISTANT_TASK:>
Python Code:
max_k =50
accuracies = np.zeros(max_k, dtype='float64')
cv = model_selection.StratifiedKFold(n_splits=5)
for k in range(max_k + 1)[1:]:
estimator = neighbors.KNeighborsClassifier(n_neighbors=k)
accuracies[k - 1] = model_selection.cross_val_score(estimator,
x, y, cv=cv).mean()
plt.figure(figsize=(14, 8))
plt.title('Accuracy(k), Stratified')
plt.xlabel('k')
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(range(max_k + 1)[1:], accuracies)
plt.show()
print 'for 4 classes best k is', accuracies.argmax() + 1
max_k =50
accuracies = np.zeros(max_k, dtype='float64')
cv = model_selection.KFold(n_splits=5)
for k in range(max_k + 1)[1:]:
estimator = neighbors.KNeighborsClassifier(n_neighbors=k)
accuracies[k - 1] = model_selection.cross_val_score(estimator,
x, y, cv=cv).mean()
plt.figure(figsize=(14, 8))
plt.title('Accuracy(k)')
plt.xlabel('k')
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(range(max_k + 1)[1:], accuracies)
plt.show()
print 'for 4 classes best k is', accuracies.argmax() + 1
(x, y) = datasets.make_classification(n_samples=500, n_features=2,
n_informative=2, n_classes=3,
n_redundant=0,
n_clusters_per_class=1,
random_state=42)
max_k =50
accuracies = np.zeros(max_k, dtype='float64')
cv = model_selection.StratifiedKFold(n_splits=5)
for k in range(max_k + 1)[1:]:
estimator = neighbors.KNeighborsClassifier(n_neighbors=k)
accuracies[k - 1] = model_selection.cross_val_score(estimator,
x, y, cv=cv).mean()
plt.figure(figsize=(14, 8))
plt.title('Accuracy(k)')
plt.xlabel('k')
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(range(max_k + 1)[1:], accuracies)
plt.show()
print 'for 3 classes best k is', accuracies.argmax() + 1
(x, y) = datasets.make_classification(n_samples=500, n_features=2,
n_informative=2, n_classes=2,
n_redundant=0,
n_clusters_per_class=1,
random_state=42)
max_k =50
accuracies = np.zeros(max_k, dtype='float64')
cv = model_selection.StratifiedKFold(n_splits=5)
for k in range(max_k + 1)[1:]:
estimator = neighbors.KNeighborsClassifier(n_neighbors=k)
accuracies[k - 1] = model_selection.cross_val_score(estimator,
x, y, cv=cv).mean()
plt.figure(figsize=(14, 8))
plt.title('Accuracy(k)')
plt.xlabel('k')
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(range(max_k + 1)[1:], accuracies)
plt.show()
print 'for 2 classes best k is', accuracies.argmax() + 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Посмотрим на точность в зависимости от $k$ при кросс-валидации на 5 обычных фолдов.
Step2: Посмотрим, что будет на 3 классах.
Step3: Посмотрим, что будет на 2 классах
|
6,804
|
<ASSISTANT_TASK:>
Python Code:
# ph_sel_name = "all-ph"
# data_id = "7d"
from fretbursts import *
init_notebook()
from IPython.display import display
data_dir = './data/singlespot/'
import os
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)
file_list
## Selection for POLIMI 2012-12-6 dataset
# file_list.pop(2)
# file_list = file_list[1:-2]
# display(file_list)
# labels = ['22d', '27d', '17d', '12d', '7d']
## Selection for P.E. 2012-12-6 dataset
# file_list.pop(1)
# file_list = file_list[:-1]
# display(file_list)
# labels = ['22d', '27d', '17d', '12d', '7d']
## Selection for POLIMI 2012-11-26 datatset
labels = ['17d', '27d', '7d', '12d', '22d']
files_dict = {lab: fname for lab, fname in zip(labels, file_list)}
files_dict
ph_sel_map = {'all-ph': Ph_sel('all'), 'AexAem': Ph_sel(Aex='Aem')}
ph_sel = ph_sel_map[ph_sel_name]
data_id, ph_sel_name
d = loader.photon_hdf5(filename=files_dict[data_id])
d.ph_times_t, d.det_t
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
plot_alternation_hist(d)
loader.alex_apply_period(d)
d
d.time_max
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)
dplot(d, timetrace_bg)
d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
from mpl_toolkits.axes_grid1 import AxesGrid
import lmfit
print('lmfit version:', lmfit.__version__)
assert d.dir_ex == 0
assert d.leakage == 0
d.burst_search(m=10, F=6, ph_sel=ph_sel)
print(d.ph_sel, d.num_bursts)
ds_sa = d.select_bursts(select_bursts.naa, th1=30)
ds_sa.num_bursts
mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30
ds_saw = d.select_bursts_mask_apply([mask])
ds_sas0 = ds_sa.select_bursts(select_bursts.S, S2=0.10)
ds_sas = ds_sa.select_bursts(select_bursts.S, S2=0.15)
ds_sas2 = ds_sa.select_bursts(select_bursts.S, S2=0.20)
ds_sas3 = ds_sa.select_bursts(select_bursts.S, S2=0.25)
ds_st = d.select_bursts(select_bursts.size, add_naa=True, th1=30)
ds_sas.num_bursts
dx = ds_sas0
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
dx = ds_sas
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
dx = ds_sas2
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
dx = ds_sas3
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
plt.title('(nd + na) for A-only population using different S cutoff');
dx = ds_sa
alex_jointplot(dx);
dplot(ds_sa, hist_S)
dx = ds_sa
bin_width = 0.03
bandwidth = 0.03
bins = np.r_[-0.2 : 1 : bin_width]
x_kde = np.arange(bins.min(), bins.max(), 0.0002)
## Weights
weights = None
## Histogram fit
fitter_g = mfit.MultiFitter(dx.S)
fitter_g.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_g.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_hist_orig = fitter_g.hist_pdf
S_2peaks = fitter_g.params.loc[0, 'p1_center']
dir_ex_S2p = S_2peaks/(1 - S_2peaks)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p)
## KDE
fitter_g.calc_kde(bandwidth=bandwidth)
fitter_g.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak = fitter_g.kde_max_pos[0]
dir_ex_S_kde = S_peak/(1 - S_peak)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_g, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100))
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=True)
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak*100));
## 2-Asym-Gaussian
fitter_ag = mfit.MultiFitter(dx.S)
fitter_ag.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_ag.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.1, p2_center=0.4))
#print(fitter_ag.fit_obj[0].model.fit_report())
S_2peaks_a = fitter_ag.params.loc[0, 'p1_center']
dir_ex_S2pa = S_2peaks_a/(1 - S_2peaks_a)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2pa)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_g, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100))
mfit.plot_mfit(fitter_ag, ax=ax[1])
ax[1].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_a*100));
dx = ds_sa.select_bursts(select_bursts.nd, th1=-100, th2=0)
fitter = bext.bursts_fitter(dx, 'S')
fitter.fit_histogram(model = mfit.factory_gaussian(center=0.1))
S_1peaks_th = fitter.params.loc[0, 'center']
dir_ex_S1p = S_1peaks_th/(1 - S_1peaks_th)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S1p)
mfit.plot_mfit(fitter)
plt.xlim(-0.1, 0.6)
dx = ds_sa
## Weights
weights = 1 - mfit.gaussian(dx.S[0], fitter_g.params.loc[0, 'p2_center'], fitter_g.params.loc[0, 'p2_sigma'])
weights[dx.S[0] >= fitter_g.params.loc[0, 'p2_center']] = 0
## Histogram fit
fitter_w1 = mfit.MultiFitter(dx.S)
fitter_w1.weights = [weights]
fitter_w1.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w1.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_2peaks_w1 = fitter_w1.params.loc[0, 'p1_center']
dir_ex_S2p_w1 = S_2peaks_w1/(1 - S_2peaks_w1)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w1)
## KDE
fitter_w1.calc_kde(bandwidth=bandwidth)
fitter_w1.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak_w1 = fitter_w1.kde_max_pos[0]
dir_ex_S_kde_w1 = S_peak_w1/(1 - S_peak_w1)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w1)
def plot_weights(x, weights, ax):
ax2 = ax.twinx()
x_sort = x.argsort()
ax2.plot(x[x_sort], weights[x_sort], color='k', lw=4, alpha=0.4)
ax2.set_ylabel('Weights');
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_w1, ax=ax[0])
mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)
plot_weights(dx.S[0], weights, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w1*100))
mfit.plot_mfit(fitter_w1, ax=ax[1], plot_model=False, plot_kde=True)
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)
plot_weights(dx.S[0], weights, ax=ax[1])
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w1*100));
## Weights
sizes = dx.nd[0] + dx.na[0] #- dir_ex_S_kde_w3*dx.naa[0]
weights = dx.naa[0] - abs(sizes)
weights[weights < 0] = 0
## Histogram
fitter_w4 = mfit.MultiFitter(dx.S)
fitter_w4.weights = [weights]
fitter_w4.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w4.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_2peaks_w4 = fitter_w4.params.loc[0, 'p1_center']
dir_ex_S2p_w4 = S_2peaks_w4/(1 - S_2peaks_w4)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w4)
## KDE
fitter_w4.calc_kde(bandwidth=bandwidth)
fitter_w4.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak_w4 = fitter_w4.kde_max_pos[0]
dir_ex_S_kde_w4 = S_peak_w4/(1 - S_peak_w4)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w4)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_w4, ax=ax[0])
mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)
#plot_weights(dx.S[0], weights, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w4*100))
mfit.plot_mfit(fitter_w4, ax=ax[1], plot_model=False, plot_kde=True)
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)
#plot_weights(dx.S[0], weights, ax=ax[1])
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w4*100));
mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30
ds_saw = d.select_bursts_mask_apply([mask])
print(ds_saw.num_bursts)
dx = ds_saw
## Weights
weights = None
## 2-Gaussians
fitter_w5 = mfit.MultiFitter(dx.S)
fitter_w5.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w5.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_2peaks_w5 = fitter_w5.params.loc[0, 'p1_center']
dir_ex_S2p_w5 = S_2peaks_w5/(1 - S_2peaks_w5)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w5)
## KDE
fitter_w5.calc_kde(bandwidth=bandwidth)
fitter_w5.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak_w5 = fitter_w5.kde_max_pos[0]
S_2peaks_w5_fiterr = fitter_w5.fit_res[0].params['p1_center'].stderr
dir_ex_S_kde_w5 = S_peak_w5/(1 - S_peak_w5)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w5)
## 2-Asym-Gaussians
fitter_w5a = mfit.MultiFitter(dx.S)
fitter_w5a.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w5a.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.05, p2_center=0.3))
S_2peaks_w5a = fitter_w5a.params.loc[0, 'p1_center']
dir_ex_S2p_w5a = S_2peaks_w5a/(1 - S_2peaks_w5a)
#print(fitter_w5a.fit_obj[0].model.fit_report(min_correl=0.5))
print('Fitted direct excitation (na/naa) [2-Asym-Gauss]:', dir_ex_S2p_w5a)
fig, ax = plt.subplots(1, 3, figsize=(19, 4.5))
mfit.plot_mfit(fitter_w5, ax=ax[0])
mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5*100))
mfit.plot_mfit(fitter_w5, ax=ax[1], plot_model=False, plot_kde=True)
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w5*100));
mfit.plot_mfit(fitter_w5a, ax=ax[2])
mfit.plot_mfit(fitter_g, ax=ax[2], plot_model=False, plot_kde=False)
ax[2].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5a*100));
sample = data_id
n_bursts_aa = ds_sas.num_bursts[0]
variables = ('sample n_bursts_aa dir_ex_S1p dir_ex_S_kde dir_ex_S2p dir_ex_S2pa '
'dir_ex_S2p_w1 dir_ex_S_kde_w1 dir_ex_S_kde_w4 dir_ex_S_kde_w5 dir_ex_S2p_w5 dir_ex_S2p_w5a '
'S_2peaks_w5 S_2peaks_w5_fiterr\n')
variables_csv = variables.replace(' ', ',')
fmt_float = '{%s:.6f}'
fmt_int = '{%s:d}'
fmt_str = '{%s}'
fmt_dict = {**{'sample': fmt_str},
**{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}
var_dict = {name: eval(name) for name in variables.split()}
var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n'
data_str = var_fmt.format(**var_dict)
print(variables_csv)
print(data_str)
# NOTE: The file name should be the notebook name but with .csv extension
with open('results/usALEX-5samples-PR-raw-dir_ex_aa-fit-%s.csv' % ph_sel_name, 'a') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(variables_csv)
f.write(data_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load software and filenames definitions
Step2: Data folder
Step3: Check that the folder exists
Step4: List of data files in data_dir
Step5: Data load
Step6: Laser alternation selection
Step7: We need to define some parameters
Step8: We should check if everithing is OK with an alternation histogram
Step9: If the plot looks good we can apply the parameters with
Step10: Measurements infos
Step11: Or check the measurements duration
Step12: Compute background
Step13: Burst search and selection
Step14: Preliminary selection and plots
Step15: A-direct excitation fitting
Step16: Zero threshold on nd
Step17: Selection 1
Step18: Selection 2
Step19: Selection 3
Step20: Save data to file
Step21: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
Step22: This is just a trick to format the different variables
|
6,805
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
from sklearn.preprocessing import normalize
from sklearn import cross_validation as cv
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import r2_score, mean_squared_error as mse
from sklearn.svm import SVR
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsClassifier
# Load the room occupancy dataset
occupancy = os.path.join('data','occupancy_data','datatraining.txt')
occupancy = pd.read_csv(occupancy, sep=',')
occupancy.columns = [
'date', 'temp', 'humid', 'light', 'co2', 'hratio', 'occupied'
]
print(len(occupancy))
def classify(attributes, targets, model):
Executes classification using the specified model and returns
a classification report.
# Split data into 'test' and 'train' for cross validation
splits = cv.train_test_split(attributes, targets, test_size=0.2)
X_train, X_test, y_train, y_test = splits
model.fit(X_train, y_train)
y_true = y_test
y_pred = model.predict(X_test)
print(classification_report(y_true, y_pred, target_names=list(occupancy)))
features = occupancy[['temp', 'humid', 'light', 'co2', 'hratio']]
labels = occupancy['occupied']
# Scale the features
stdfeatures = scale(features)
classify(stdfeatures, labels, LinearSVC())
classify(stdfeatures, labels, KNeighborsClassifier())
# Load the concrete compression data set
concrete = pd.read_excel(os.path.join('data','Concrete_Data.xls'))
concrete.columns = [
'cement', 'slag', 'ash', 'water', 'splast',
'coarse', 'fine', 'age', 'strength'
]
print(len(concrete))
def regress(attributes, targets, model):
# Split data into 'test' and 'train' for cross validation
splits = cv.train_test_split(attributes, targets, test_size=0.2)
X_train, X_test, y_train, y_test = splits
model.fit(X_train, y_train)
y_true = y_test
y_pred = model.predict(X_test)
print("Mean squared error = {:0.3f}".format(mse(y_true, y_pred)))
print("R2 score = {:0.3f}".format(r2_score(y_true, y_pred)))
features = concrete[[
'cement', 'slag', 'ash', 'water', 'splast', 'coarse', 'fine', 'age'
]]
labels = concrete['strength']
regress(features, labels, Ridge())
regress(features, labels, Lasso())
regress(features, labels, ElasticNet())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's try it together.
Step2: More than 50 samples?
Step4: Predicting a quantity or a category?
Step5: Since our categorical dataset has fewer than 100,000 instances, we are prompted to start with sklearn.svm.LinearSVC (which will map the data to a higher dimensional feature space), or failing that, sklearn.neighbors.KNeighborsClassifier (which will assign instances to the class most common among its k nearest neighbors).
Step6: More than 50 samples?
Step7: Predicting a quantity or a category?
Step8: Meanwhile for our concrete dataset, we must determine whether we think all of the features are important, or only a few of them. If we decide to keep all the features as is, the chart suggests using sklearn.linear_model.RidgeRegression (which will identify features that are less predictive and ensure they have less influence in the model) or possibly sklearn.svm.SVR with a linear kernel (which is similar to the LinearSVC classifier). If we guess that some of the features are not important, we might decide instead to choose sklearn.linear_model.Lasso (which will drop out any features that aren't predictive) or sklearn.linear_model.ElasticNet (which will try to find a happy medium between the Lasso and Ridge methods, taking the linear combination of their L1 and L2 penalties).
|
6,806
|
<ASSISTANT_TASK:>
Python Code:
!pip install dm-sonnet tqdm
# Get enformer source code
!wget -q https://raw.githubusercontent.com/deepmind/deepmind-research/master/enformer/attention_module.py
!wget -q https://raw.githubusercontent.com/deepmind/deepmind-research/master/enformer/enformer.py
import tensorflow as tf
# Make sure the GPU is enabled
assert tf.config.list_physical_devices('GPU'), 'Start the colab kernel with GPU: Runtime -> Change runtime type -> GPU'
# Easier debugging of OOM
%env TF_ENABLE_GPU_GARBAGE_COLLECTION=false
import sonnet as snt
from tqdm import tqdm
from IPython.display import clear_output
import numpy as np
import pandas as pd
import time
import os
assert snt.__version__.startswith('2.0')
tf.__version__
# GPU colab has T4 with 16 GiB of memory
!nvidia-smi
import enformer
# @title `get_targets(organism)`
def get_targets(organism):
targets_txt = f'https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_{organism}.txt'
return pd.read_csv(targets_txt, sep='\t')
# @title `get_dataset(organism, subset, num_threads=8)`
import glob
import json
import functools
def organism_path(organism):
return os.path.join('gs://basenji_barnyard/data', organism)
def get_dataset(organism, subset, num_threads=8):
metadata = get_metadata(organism)
dataset = tf.data.TFRecordDataset(tfrecord_files(organism, subset),
compression_type='ZLIB',
num_parallel_reads=num_threads)
dataset = dataset.map(functools.partial(deserialize, metadata=metadata),
num_parallel_calls=num_threads)
return dataset
def get_metadata(organism):
# Keys:
# num_targets, train_seqs, valid_seqs, test_seqs, seq_length,
# pool_width, crop_bp, target_length
path = os.path.join(organism_path(organism), 'statistics.json')
with tf.io.gfile.GFile(path, 'r') as f:
return json.load(f)
def tfrecord_files(organism, subset):
# Sort the values by int(*).
return sorted(tf.io.gfile.glob(os.path.join(
organism_path(organism), 'tfrecords', f'{subset}-*.tfr'
)), key=lambda x: int(x.split('-')[-1].split('.')[0]))
def deserialize(serialized_example, metadata):
Deserialize bytes stored in TFRecordFile.
feature_map = {
'sequence': tf.io.FixedLenFeature([], tf.string),
'target': tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_example(serialized_example, feature_map)
sequence = tf.io.decode_raw(example['sequence'], tf.bool)
sequence = tf.reshape(sequence, (metadata['seq_length'], 4))
sequence = tf.cast(sequence, tf.float32)
target = tf.io.decode_raw(example['target'], tf.float16)
target = tf.reshape(target,
(metadata['target_length'], metadata['num_targets']))
target = tf.cast(target, tf.float32)
return {'sequence': sequence,
'target': target}
df_targets_human = get_targets('human')
df_targets_human.head()
human_dataset = get_dataset('human', 'train').batch(1).repeat()
mouse_dataset = get_dataset('mouse', 'train').batch(1).repeat()
human_mouse_dataset = tf.data.Dataset.zip((human_dataset, mouse_dataset)).prefetch(2)
it = iter(mouse_dataset)
example = next(it)
# Example input
it = iter(human_mouse_dataset)
example = next(it)
for i in range(len(example)):
print(['human', 'mouse'][i])
print({k: (v.shape, v.dtype) for k,v in example[i].items()})
def create_step_function(model, optimizer):
@tf.function
def train_step(batch, head, optimizer_clip_norm_global=0.2):
with tf.GradientTape() as tape:
outputs = model(batch['sequence'], is_training=True)[head]
loss = tf.reduce_mean(
tf.keras.losses.poisson(batch['target'], outputs))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply(gradients, model.trainable_variables)
return loss
return train_step
learning_rate = tf.Variable(0., trainable=False, name='learning_rate')
optimizer = snt.optimizers.Adam(learning_rate=learning_rate)
num_warmup_steps = 5000
target_learning_rate = 0.0005
model = enformer.Enformer(channels=1536 // 4, # Use 4x fewer channels to train faster.
num_heads=8,
num_transformer_layers=11,
pooling_type='max')
train_step = create_step_function(model, optimizer)
# Train the model
steps_per_epoch = 20
num_epochs = 5
data_it = iter(human_mouse_dataset)
global_step = 0
for epoch_i in range(num_epochs):
for i in tqdm(range(steps_per_epoch)):
global_step += 1
if global_step > 1:
learning_rate_frac = tf.math.minimum(
1.0, global_step / tf.math.maximum(1.0, num_warmup_steps))
learning_rate.assign(target_learning_rate * learning_rate_frac)
batch_human, batch_mouse = next(data_it)
loss_human = train_step(batch=batch_human, head='human')
loss_mouse = train_step(batch=batch_mouse, head='mouse')
# End of epoch.
print('')
print('loss_human', loss_human.numpy(),
'loss_mouse', loss_mouse.numpy(),
'learning_rate', optimizer.learning_rate.numpy()
)
# @title `PearsonR` and `R2` metrics
def _reduced_shape(shape, axis):
if axis is None:
return tf.TensorShape([])
return tf.TensorShape([d for i, d in enumerate(shape) if i not in axis])
class CorrelationStats(tf.keras.metrics.Metric):
Contains shared code for PearsonR and R2.
def __init__(self, reduce_axis=None, name='pearsonr'):
Pearson correlation coefficient.
Args:
reduce_axis: Specifies over which axis to compute the correlation (say
(0, 1). If not specified, it will compute the correlation across the
whole tensor.
name: Metric name.
super(CorrelationStats, self).__init__(name=name)
self._reduce_axis = reduce_axis
self._shape = None # Specified in _initialize.
def _initialize(self, input_shape):
# Remaining dimensions after reducing over self._reduce_axis.
self._shape = _reduced_shape(input_shape, self._reduce_axis)
weight_kwargs = dict(shape=self._shape, initializer='zeros')
self._count = self.add_weight(name='count', **weight_kwargs)
self._product_sum = self.add_weight(name='product_sum', **weight_kwargs)
self._true_sum = self.add_weight(name='true_sum', **weight_kwargs)
self._true_squared_sum = self.add_weight(name='true_squared_sum',
**weight_kwargs)
self._pred_sum = self.add_weight(name='pred_sum', **weight_kwargs)
self._pred_squared_sum = self.add_weight(name='pred_squared_sum',
**weight_kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
Update the metric state.
Args:
y_true: Multi-dimensional float tensor [batch, ...] containing the ground
truth values.
y_pred: float tensor with the same shape as y_true containing predicted
values.
sample_weight: 1D tensor aligned with y_true batch dimension specifying
the weight of individual observations.
if self._shape is None:
# Explicit initialization check.
self._initialize(y_true.shape)
y_true.shape.assert_is_compatible_with(y_pred.shape)
y_true = tf.cast(y_true, 'float32')
y_pred = tf.cast(y_pred, 'float32')
self._product_sum.assign_add(
tf.reduce_sum(y_true * y_pred, axis=self._reduce_axis))
self._true_sum.assign_add(
tf.reduce_sum(y_true, axis=self._reduce_axis))
self._true_squared_sum.assign_add(
tf.reduce_sum(tf.math.square(y_true), axis=self._reduce_axis))
self._pred_sum.assign_add(
tf.reduce_sum(y_pred, axis=self._reduce_axis))
self._pred_squared_sum.assign_add(
tf.reduce_sum(tf.math.square(y_pred), axis=self._reduce_axis))
self._count.assign_add(
tf.reduce_sum(tf.ones_like(y_true), axis=self._reduce_axis))
def result(self):
raise NotImplementedError('Must be implemented in subclasses.')
def reset_states(self):
if self._shape is not None:
tf.keras.backend.batch_set_value([(v, np.zeros(self._shape))
for v in self.variables])
class PearsonR(CorrelationStats):
Pearson correlation coefficient.
Computed as:
((x - x_avg) * (y - y_avg) / sqrt(Var[x] * Var[y])
def __init__(self, reduce_axis=(0,), name='pearsonr'):
Pearson correlation coefficient.
Args:
reduce_axis: Specifies over which axis to compute the correlation.
name: Metric name.
super(PearsonR, self).__init__(reduce_axis=reduce_axis,
name=name)
def result(self):
true_mean = self._true_sum / self._count
pred_mean = self._pred_sum / self._count
covariance = (self._product_sum
- true_mean * self._pred_sum
- pred_mean * self._true_sum
+ self._count * true_mean * pred_mean)
true_var = self._true_squared_sum - self._count * tf.math.square(true_mean)
pred_var = self._pred_squared_sum - self._count * tf.math.square(pred_mean)
tp_var = tf.math.sqrt(true_var) * tf.math.sqrt(pred_var)
correlation = covariance / tp_var
return correlation
class R2(CorrelationStats):
R-squared (fraction of explained variance).
def __init__(self, reduce_axis=None, name='R2'):
R-squared metric.
Args:
reduce_axis: Specifies over which axis to compute the correlation.
name: Metric name.
super(R2, self).__init__(reduce_axis=reduce_axis,
name=name)
def result(self):
true_mean = self._true_sum / self._count
total = self._true_squared_sum - self._count * tf.math.square(true_mean)
residuals = (self._pred_squared_sum - 2 * self._product_sum
+ self._true_squared_sum)
return tf.ones_like(residuals) - residuals / total
class MetricDict:
def __init__(self, metrics):
self._metrics = metrics
def update_state(self, y_true, y_pred):
for k, metric in self._metrics.items():
metric.update_state(y_true, y_pred)
def result(self):
return {k: metric.result() for k, metric in self._metrics.items()}
def evaluate_model(model, dataset, head, max_steps=None):
metric = MetricDict({'PearsonR': PearsonR(reduce_axis=(0,1))})
@tf.function
def predict(x):
return model(x, is_training=False)[head]
for i, batch in tqdm(enumerate(dataset)):
if max_steps is not None and i > max_steps:
break
metric.update_state(batch['target'], predict(batch['sequence']))
return metric.result()
metrics_human = evaluate_model(model,
dataset=get_dataset('human', 'valid').batch(1).prefetch(2),
head='human',
max_steps=100)
print('')
print({k: v.numpy().mean() for k, v in metrics_human.items()})
metrics_mouse = evaluate_model(model,
dataset=get_dataset('mouse', 'valid').batch(1).prefetch(2),
head='mouse',
max_steps=100)
print('')
print({k: v.numpy().mean() for k, v in metrics_mouse.items()})
np.random.seed(42)
EXTENDED_SEQ_LENGTH = 393_216
SEQ_LENGTH = 196_608
inputs = np.array(np.random.random((1, EXTENDED_SEQ_LENGTH, 4)), dtype=np.float32)
inputs_cropped = enformer.TargetLengthCrop1D(SEQ_LENGTH)(inputs)
checkpoint_gs_path = 'gs://dm-enformer/models/enformer/sonnet_weights/*'
checkpoint_path = '/tmp/enformer_checkpoint'
!mkdir /tmp/enformer_checkpoint
# Copy checkpoints from GCS to temporary directory.
# This will take a while as the checkpoint is ~ 1GB.
for file_path in tf.io.gfile.glob(checkpoint_gs_path):
print(file_path)
file_name = os.path.basename(file_path)
tf.io.gfile.copy(file_path, f'{checkpoint_path}/{file_name}', overwrite=True)
!ls -lh /tmp/enformer_checkpoint
enformer_model = enformer.Enformer()
checkpoint = tf.train.Checkpoint(module=enformer_model)
latest = tf.train.latest_checkpoint(checkpoint_path)
print(latest)
status = checkpoint.restore(latest)
# Using `is_training=False` to match TF-hub predict_on_batch function.
restored_predictions = enformer_model(inputs_cropped, is_training=False)
import tensorflow_hub as hub
enformer_tf_hub_model = hub.load("https://tfhub.dev/deepmind/enformer/1").model
hub_predictions = enformer_tf_hub_model.predict_on_batch(inputs)
np.allclose(hub_predictions['human'], restored_predictions['human'], atol=1e-5)
# Can run with 'is_training=True' but note that this will
# change the predictions as the batch statistics will be updated
# and the outputs will likley not match the TF-hub model.
# enformer(inputs_cropped, is_training=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import
Step3: Code
Step4: Load dataset
Step5: Model training
Step13: Evaluate
Step14: Restore Checkpoint
|
6,807
|
<ASSISTANT_TASK:>
Python Code:
Celsius = [36.2, 36.7, 47.3, 17.8]
Fahrenheit = [1.8 * C + 32 for C in Celsius]
Fahrenheit
colors = ["red", "purple", "yellow", "blue", "green"]
things = [ "triangle", "rectangle", "pentagon" ]
all_combination = [(x, y) for x in things for y in colors]
all_combination
import urllib
url = "http://weather.noaa.gov/pub/data" +\
"/observations/metar/decoded/"
def NOAA_string(s):
noaa_data_string = urllib.urlopen(url + s + '.TXT').read()
return noaa_data_string
def NOAA_temperature(s):
if s.find("Temperature") == -1:
return "Info NA"
else:
L = s.split('\n')
for index, line in enumerate(L):
if line.find("Temperature") == -1:
pass
else:
break
temp_line = L[index].split()
return int(float(temp_line[-2][1:]))
def city_temperature(s):
return NOAA_temperature(NOAA_string(s))
print(city_temperature('A302'))
print(city_temperature('RKSG'))
import requests
NOAA = 'http://weather.noaa.gov/pub/data/observations/metar/decoded/'
city_name_info = str(requests.get(NOAA).text)
type(city_name_info)
city_name_info_line = city_name_info.split('\n')
type(city_name_info_line)
len(city_name_info_line)
city_name_info_line[:10]
city_name_info_line[8:18]
city_name_info_line[-10:]
city_name_info_line[-10:-4]
A302_line = city_name_info_line[8]
num = A302_line.find('.TXT')
print(A302_line[num-4 : num])
city_codes = []
for line in city_name_info_line[8:-4]:
num = line.find('.TXT')
city_codes.append(line[num-4 : num])
city_codes = [line[line.find('.TXT')-4 : line.find('.TXT')] for line in city_name_info_line[8:-4]]
city_codes[-10:]
len(city_codes)
city_codes[:10]
def city_temp_list(num):
L = []
for city in city_codes[:num]:
temp = city_temperature(city)
L.append([city, temp])
return L
List_sample30 = city_temp_list(30)
city_codes[:30]
def list_search(x, xs):
for y in xs:
if x == y[0]:
return y[1]
else:
pass
%time list_search('BDAB', List_sample30)
%time list_search('ABLC', List_sample30)
%time list_search('ABBN', List_sample30)
%time for code in city_codes[:30]: list_search(code, List_sample30)
def cities(xs):
L = []
for x in xs:
L.append(x[x.find('.TXT') - 4: x.find('.TXT') + 4])
return L
def city_temp_hash(num):
H = {}
for city in city_codes[:num]:
temp = city_temperature(city)
H[city] = temp
return H
Hash_sample30 = city_temp_hash(30)
Hash_sample30
%time Hash_sample30['BDAB']
%time Hash_sample30['AVLC']
%time Hash_sample30['ABBN']
%time for code in city_codes[:30]: Hash_sample30[code]
def city_temp_list_F(num):
F = []
for center in city_temp_list(num):
if isinstance(center[1], int):
F.append([center[0], float(1.8 * center[1] + 32)])
else:
F.append([center[0], 'Info NA'])
return F
city_temp_list_F(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 위 리스트를 이용하여 화씨 온도로 이루어진 리스트를 구현하는 방법은 아래와 같다.
Step2: 예제
Step3: (모양, 색깔) 형태의 튜플들의 가능한 모든 조합을 갖는 리스트를 구현하려면 다음과 할 수 있다.
Step4: 해시 테이이블 활용
Step5: 연습문제
Step6: 아래 명령어를 실행하면
Step7: 처음 몇 줄만을 확인해보자.
Step8: 9번 줄부터 도시 정보에 대한 파일들 정보가 들어있음을 확인 할 수 있다.
Step9: 아래 사이트에 보이는 상위 10개 도시의 코드명 파일과 일치하는 내용을 담고 있음을 확인할 수 있다.
Step10: 마지막 4줄은 역시 html 관련 정보이며 기상관측센터와 아무 상관이 없다.
Step11: 이제 city_line_info_line의 각 항목에서 각 도시의 코드명을 추출하기는 쉽다.
Step12: 위 코드에 for문을 적용하면 모든 도시의 코드명으로 이루어진 리스트를 생성할 수 있다.
Step13: 위 코드를 리스트 조건제시법으로 구현할 수도 있다.
Step14: 현재 8947개의 기상관측센터의 코드명이 저장되어 있음을 확인할 수 있다.
Step15: 다음 단계는 코드명과 해당 도시의 온도를 쌍으로 갖는 시퀀스 자료형을 만드는 일이다.
Step16: 이제 처음 30개 기상관측센터의 정보를 저장해보자.
Step17: 그런 다음 특정 기상관측센터에서 측정한 온도를 확인해보자.
Step18: 쌍들의 리스트에서 특정 코드명의 온도를 찾아내는 코드는 예를 들어 아래와 같다.
Step19: list_search 함수의 작동시간을 측정하려면 아래와 같이 실행하면 된다.
Step20: 30개 정보를 모두 확인하는데 총 60 마이크로 초(microsecond)가 걸린다.
Step21: 문제
Step22: 해시 테이블을 이용하였을 경우 30개의 샘플 정보를 모두 확인하는 데에 12 마이크로 초가 걸린다.
|
6,808
|
<ASSISTANT_TASK:>
Python Code:
from PersistenceExplorer import *
[ filename for filename in os.listdir('../data/bmp') if filename.endswith('.bmp') ]
ProcessImageFolderWithPHAT('../data/bmp/')
[ filename for filename in os.listdir('../data/bmp/pd_sub') if filename.endswith('.csv') ]
with open('../data/bmp/pd_sub/00001.csv', 'r') as f:
csv_data = f.read()
print(csv_data)
imagefiles = [ '/files/data/bmp/%05d.bmp' % i for i in range(1,21)]
pdfiles = [ '/files/data/bmp/pd_sub/%05d.csv' % i for i in range(1,21)]
frames = range(0, len(imagefiles))
imagesize = [421, 421]
max_image_display_size = 400
persistence_diagram_display_size = 400
dimension_of_interest = 0
PersistenceExplorer(imagefiles, pdfiles, frames, dimension_of_interest,
imagesize, max_image_display_size, persistence_diagram_display_size)
dimension_of_interest = 1
PersistenceExplorer(imagefiles, pdfiles, frames, dimension_of_interest,
imagesize, max_image_display_size, persistence_diagram_display_size)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Computing persistence of images
Step2: Now that we know we have a set of images, we want to compute the corresponding persistence diagrams. The following command will compute both the sublevel and superlevel persistence of each image and save the results as .csv files. Each image processed will result in a .csv file with the same basename as the original input image, e.g. 00001.bmp results in files pd_sub/00001.csv and pd_sup/00001.csv.
Step3: We verify the sublevel results are indeed stored in a newly created subfolder pd_sub
Step4: Similarly the superlevel persistence results are stored in the subdirectory pd_sup. We can visually inspect the results of such a file
Step5: Finer control of persistence calculations
Step8: After these general environment parameters have been set up, we can now call the persistence explorer to interact with our data. We still need to choose a dimension of interest (i.e. $H_0$ features or $H_1$ persistence features), and then provide this choice and the above parameters to the tool.
Step9: Visualizing persistence generators
|
6,809
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import aiplatform
REGION = 'us-central1'
PROJECT_ID = !(gcloud config get-value project)
PROJECT_ID = PROJECT_ID[0]
# Set `PATH` to include the directory containing KFP CLI
PATH=%env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
!cat trainer_image_vertex/Dockerfile
IMAGE_NAME='trainer_image_covertype_vertex'
TAG='latest'
TRAINING_CONTAINER_IMAGE_URI=f'gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}'
TRAINING_CONTAINER_IMAGE_URI
!gcloud builds submit --timeout 15m --tag $TRAINING_CONTAINER_IMAGE_URI trainer_image_vertex
SERVING_CONTAINER_IMAGE_URI = 'us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest'
%%writefile ./pipeline_vertex/pipeline.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
Kubeflow Covertype Pipeline.
import os
from kfp import dsl
from kfp.components import create_component_from_func_v2
from tuning_lightweight_component import tune_hyperparameters
from training_lightweight_component import train_and_deploy
PIPELINE_ROOT = os.getenv('PIPELINE_ROOT')
PROJECT_ID = os.getenv('PROJECT_ID')
REGION = os.getenv('REGION')
TRAINING_CONTAINER_IMAGE_URI = os.getenv('TRAINING_CONTAINER_IMAGE_URI')
SERVING_CONTAINER_IMAGE_URI = os.getenv('SERVING_CONTAINER_IMAGE_URI')
TRAINING_FILE_PATH = os.getenv('TRAINING_FILE_PATH')
VALIDATION_FILE_PATH = os.getenv('VALIDATION_FILE_PATH')
MAX_TRIAL_COUNT = os.getenv('MAX_TRIAL_COUNT', 5)
PARALLEL_TRIAL_COUNT = os.getenv('PARALLEL_TRIAL_COUNT', 5)
THRESHOLD = os.getenv('THRESHOLD', 0.6)
tune_hyperparameters_component = # TODO
train_and_deploy_component = # TODO
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f'{pipeline_root}/staging'
tuning_op = # TODO
accuracy = tuning_op.outputs['best_accuracy']
with dsl.Condition(accuracy >= accuracy_deployment_threshold, name="deploy_decision"):
train_and_deploy_op = # TODO
ARTIFACT_STORE = f'gs://{PROJECT_ID}-vertex'
PIPELINE_ROOT = f'{ARTIFACT_STORE}/pipeline'
DATA_ROOT = f'{ARTIFACT_STORE}/data'
TRAINING_FILE_PATH = f'{DATA_ROOT}/training/dataset.csv'
VALIDATION_FILE_PATH = f'{DATA_ROOT}/validation/dataset.csv'
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT_ID={PROJECT_ID}
%env REGION={REGION}
%env SERVING_CONTAINER_IMAGE_URI={SERVING_CONTAINER_IMAGE_URI}
%env TRAINING_CONTAINER_IMAGE_URI={TRAINING_CONTAINER_IMAGE_URI}
%env TRAINING_FILE_PATH={TRAINING_FILE_PATH}
%env VALIDATION_FILE_PATH={VALIDATION_FILE_PATH}
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
PIPELINE_JSON = 'covertype_kfp_pipeline.json'
# TODO
!head {PIPELINE_JSON}
# TODO
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Understanding the pipeline design
Step2: Let's now build and push this trainer container to the container registry
Step3: To match the ml framework version we use at training time while serving the model, we will have to supply the following serving container to the pipeline
Step5: Note
Step6: Compile the pipeline
Step7: Let us make sure that the ARTIFACT_STORE has been created, and let us create it if not
Step8: Note
Step9: Exercise
Step10: Note
Step11: Deploy the pipeline package
|
6,810
|
<ASSISTANT_TASK:>
Python Code:
import torch as t
# Tensors
a = t.tensor([1,2,3])
# Can specify type during construction
a = t.tensor([1,2,3], dtype=t.half)
# Can cast to different types once constructed
a
a.double()
a.float()
a.short()
a.long()
import numpy as np
x = t.Tensor([1,2,3])
x
y = x.numpy()
type(y)
x
y
x += 10
x
y
y = np.array([5,4,3])
type(y)
x = t.from_numpy(y)
type(x)
y
x
y += 10
y
x
import time
def timer(f, trials=5):
avg_time = 0
for i in range(trials):
start = time.time()
f()
stop = time.time()
avg_time += (stop - start)/trials
return avg_time
np_arr = np.random.rand(10000,10000)
t_arr = t.rand((10000,10000))
print('type(t_arr) = ', type(t_arr))
print('t_arr.shape = ', t_arr.shape)
print('type(np_arr) = ', type(np_arr))
print('np_arr.shape = ', np_arr.shape)
timer(lambda: t_arr*t_arr)
timer(lambda: np_arr*np_arr)
def exp1():
y = t_arr * t_arr
y = t.empty(t_arr.shape)
def exp2():
t.mul(t_arr, t_arr, out=y)
def exp3():
t_arr.mul_(t_arr)
timer(exp1)
timer(exp2)
timer(exp3)
t_arr.shape
t_arr.device # Should currently be on the cpu
device = t.device('cuda')
t_arr.to(device)
t.cuda.is_available()
b = t.tensor([2])
b.requires_grad
y=b*b
y.backward()
b.requires_grad = True # or b.requires_grad_()
y=b*b
y.backward()
b.grad
c = b.detach()
y=c*c
y.backward()
b.grad
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: | Data type | dtype |CPU tensor |GPU tensor |
Step2: However, they point to the same place in memory...
Step3: Numpy -> Tensor
Step4: Speed and Efficiency
Step5: What about optimizing with some class methods?
Step6: Tensors have 100+ built-in class methods
Step7: a deep learning research platform that provides maximum flexibility and speed
|
6,811
|
<ASSISTANT_TASK:>
Python Code:
# Importing a few necessary libraries
import numpy as np
import matplotlib.pyplot as pl
from sklearn import datasets
from sklearn.tree import DecisionTreeRegressor
# Make matplotlib show our plots inline (nicely formatted in the notebook)
%matplotlib inline
# Create our client's feature set for which we will be predicting a selling price
CLIENT_FEATURES = [[11.95, 0.00, 18.100, 0, 0.6590, 5.6090, 90.00, 1.385, 24, 680.0, 20.20, 332.09, 12.13]]
# Load the Boston Housing dataset into the city_data variable
city_data = datasets.load_boston()
# Initialize the housing prices and housing features
housing_prices = city_data.target
housing_features = city_data.data
print "Boston Housing dataset loaded successfully!"
# Number of houses in the dataset
total_houses = housing_features.shape[0]
# Number of features in the dataset
total_features = housing_features.shape[1]
# Minimum housing value in the dataset
minimum_price = housing_prices.min()
# Maximum housing value in the dataset
maximum_price = housing_prices.max()
# Mean house value of the dataset
mean_price = housing_prices.mean()
# Median house value of the dataset
median_price = np.median(housing_prices)
# Standard deviation of housing values of the dataset
std_dev = housing_prices.std()
# Show the calculated statistics
print "Boston Housing dataset statistics (in $1000's):\n"
print "Total number of houses:", total_houses
print "Total number of features:", total_features
print "Minimum house price:", minimum_price
print "Maximum house price:", maximum_price
print "Mean house price: {0:.3f}".format(mean_price)
print "Median house price:", median_price
print "Standard deviation of house price: {0:.3f}".format(std_dev)
print CLIENT_FEATURES
# Put any import statements you need for this code block here
from sklearn.cross_validation import train_test_split
def shuffle_split_data(X, y):
Shuffles and splits data into 70% training and 30% testing subsets,
then returns the training and testing subsets.
# Shuffle and split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Return the training and testing data subsets
return X_train, y_train, X_test, y_test
# Test shuffle_split_data
try:
X_train, y_train, X_test, y_test = shuffle_split_data(housing_features, housing_prices)
print "Successfully shuffled and split the data!"
except:
print "Something went wrong with shuffling and splitting the data."
# Put any import statements you need for this code block here
from sklearn.metrics import mean_squared_error
def performance_metric(y_true, y_predict):
Calculates and returns the total error between true and predicted values
based on a performance metric chosen by the student.
error = mean_squared_error(y_true, y_predict)
return error
# Test performance_metric
try:
total_error = performance_metric(y_train, y_train)
print "Successfully performed a metric calculation!"
except:
print "Something went wrong with performing a metric calculation."
# Put any import statements you need for this code block
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn.grid_search import GridSearchCV
def fit_model(X, y):
Tunes a decision tree regressor model using GridSearchCV on the input data X
and target labels y and returns this optimal model.
# Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# Set up the parameters we wish to tune
parameters = {'max_depth':(1,2,3,4,5,6,7,8,9,10)}
# Make an appropriate scoring function
scoring_function = make_scorer(mean_squared_error, greater_is_better=False)
# Make the GridSearchCV object
reg = GridSearchCV(regressor, param_grid=parameters, scoring=scoring_function)
# Fit the learner to the data to obtain the optimal model with tuned parameters
reg.fit(X, y)
# Return the optimal model
return reg.best_estimator_
# Test fit_model on entire dataset
try:
reg = fit_model(housing_features, housing_prices)
print "Successfully fit a model!"
except:
print "Something went wrong with fitting a model."
def learning_curves(X_train, y_train, X_test, y_test):
Calculates the performance of several models with varying sizes of training data.
The learning and testing error rates for each model are then plotted.
print "Creating learning curve graphs for max_depths of 1, 3, 6, and 10. . ."
# Create the figure window
fig = pl.figure(figsize=(10,8))
# We will vary the training set size so that we have 50 different sizes
sizes = np.rint(np.linspace(1, len(X_train), 50)).astype(int)
train_err = np.zeros(len(sizes))
test_err = np.zeros(len(sizes))
# Create four different models based on max_depth
for k, depth in enumerate([1,3,6,10]):
for i, s in enumerate(sizes):
# Setup a decision tree regressor so that it learns a tree with max_depth = depth
regressor = DecisionTreeRegressor(max_depth = depth)
# Fit the learner to the training data
regressor.fit(X_train[:s], y_train[:s])
# Find the performance on the training set
train_err[i] = performance_metric(y_train[:s], regressor.predict(X_train[:s]))
# Find the performance on the testing set
test_err[i] = performance_metric(y_test, regressor.predict(X_test))
# Subplot the learning curve graph
ax = fig.add_subplot(2, 2, k+1)
ax.plot(sizes, test_err, lw = 2, label = 'Testing Error')
ax.plot(sizes, train_err, lw = 2, label = 'Training Error')
ax.legend()
ax.set_title('max_depth = %s'%(depth))
ax.set_xlabel('Number of Data Points in Training Set')
ax.set_ylabel('Total Error')
ax.set_xlim([0, len(X_train)])
# Visual aesthetics
fig.suptitle('Decision Tree Regressor Learning Performances', fontsize=18, y=1.03)
fig.tight_layout()
fig.show()
def model_complexity(X_train, y_train, X_test, y_test):
Calculates the performance of the model as model complexity increases.
The learning and testing errors rates are then plotted.
print "Creating a model complexity graph. . . "
# We will vary the max_depth of a decision tree model from 1 to 14
max_depth = np.arange(1, 14)
train_err = np.zeros(len(max_depth))
test_err = np.zeros(len(max_depth))
for i, d in enumerate(max_depth):
# Setup a Decision Tree Regressor so that it learns a tree with depth d
regressor = DecisionTreeRegressor(max_depth = d)
# Fit the learner to the training data
regressor.fit(X_train, y_train)
# Find the performance on the training set
train_err[i] = performance_metric(y_train, regressor.predict(X_train))
# Find the performance on the testing set
test_err[i] = performance_metric(y_test, regressor.predict(X_test))
# Plot the model complexity graph
pl.figure(figsize=(7, 5))
pl.title('Decision Tree Regressor Complexity Performance')
pl.plot(max_depth, test_err, lw=2, label = 'Testing Error')
pl.plot(max_depth, train_err, lw=2, label = 'Training Error')
pl.legend()
pl.xlabel('Maximum Depth')
pl.ylabel('Total Error')
pl.show()
learning_curves(X_train, y_train, X_test, y_test)
model_complexity(X_train, y_train, X_test, y_test)
print "Final model has an optimal max_depth parameter of", reg.get_params()['max_depth']
sale_price = reg.predict(CLIENT_FEATURES)
print "Predicted value of client's home: {0:.3f}".format(sale_price[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Statistical Analysis and Data Exploration
Step 1
Step2: Question 1
Step4: Answer
Step6: Question 3
Step8: Question 4
Step11: Question 5
Step12: Analyzing Model Performance
Step13: Question 7
Step14: Question 9
Step15: Answer
|
6,812
|
<ASSISTANT_TASK:>
Python Code:
def squared_numbers(n):
return [x*x for x in range(n)]
def sum_squares(n):
return sum(squared_numbers(n+1))
sum_squares(20000000)
def squared_numbers_alternate(n):
for x in range(n):
yield x*x
def sum_squares_alternate(n):
return sum(squared_numbers_alternate(n+1))
sum_squares(20000000)
import os
print(os.getcwd())
def grep(fileobject, pattern):
for index, line in enumerate(fileobject):
if pattern in line:
# start indexing from 1 for humans
# remove the white space at the end
yield index+1, line.strip()
def process_file(input_, pattern):
with open(input_, "r") as file_:
for idx, line in grep(file_, pattern):
print("line {} matches: {}".format(idx, line))
print("done searching")
process_file("../data/grep.txt", "test")
square = lambda x: x*x
print(square(4))
(lambda x: x-1).__call__(1)
my_list = [
("apple", 5),
("banana", 3),
("pear", 10)
]
my_list.sort(key= lambda x: x[1]) #sort by the number
my_list
def mean(...):
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The code works and is all great, but it has one flaw
Step2: At this you may wonder, doesn't range() return a list? The short answer is no, but the details are complicated.
Step3: Lambdas
Step4: A typical use case for lambda might be in accessing members of an object in a generic function.
Step5: Lambda has many other uses but those are left as a thought exercise.
|
6,813
|
<ASSISTANT_TASK:>
Python Code:
from pomegranate import *
# The guests initial door selection is completely random
guest = DiscreteDistribution( { 'A': 1./3, 'B': 1./3, 'C': 1./3 } )
# The door the prize is behind is also completely random
prize = DiscreteDistribution( { 'A': 1./3, 'B': 1./3, 'C': 1./3 } )
# Monty is dependent on both the guest and the prize.
monty = ConditionalProbabilityTable(
[[ 'A', 'A', 'A', 0.0 ],
[ 'A', 'A', 'B', 0.5 ],
[ 'A', 'A', 'C', 0.5 ],
[ 'A', 'B', 'A', 0.0 ],
[ 'A', 'B', 'B', 0.0 ],
[ 'A', 'B', 'C', 1.0 ],
[ 'A', 'C', 'A', 0.0 ],
[ 'A', 'C', 'B', 1.0 ],
[ 'A', 'C', 'C', 0.0 ],
[ 'B', 'A', 'A', 0.0 ],
[ 'B', 'A', 'B', 0.0 ],
[ 'B', 'A', 'C', 1.0 ],
[ 'B', 'B', 'A', 0.5 ],
[ 'B', 'B', 'B', 0.0 ],
[ 'B', 'B', 'C', 0.5 ],
[ 'B', 'C', 'A', 1.0 ],
[ 'B', 'C', 'B', 0.0 ],
[ 'B', 'C', 'C', 0.0 ],
[ 'C', 'A', 'A', 0.0 ],
[ 'C', 'A', 'B', 1.0 ],
[ 'C', 'A', 'C', 0.0 ],
[ 'C', 'B', 'A', 1.0 ],
[ 'C', 'B', 'B', 0.0 ],
[ 'C', 'B', 'C', 0.0 ],
[ 'C', 'C', 'A', 0.5 ],
[ 'C', 'C', 'B', 0.5 ],
[ 'C', 'C', 'C', 0.0 ]], [guest, prize] )
# State objects hold both the distribution, and a high level name.
s1 = State( guest, name="guest" )
s2 = State( prize, name="prize" )
s3 = State( monty, name="monty" )
# Create the Bayesian network object with a useful name
model = BayesianNetwork( "Monty Hall Problem" )
# Add the three states to the network
model.add_states(s1, s2, s3)
# Add transitions which represent conditional dependencies, where the second node is conditionally dependent on the first node (Monty is dependent on both guest and prize)
model.add_transition(s1, s3)
model.add_transition(s2, s3)
model.bake()
print model.probability(['A', 'B', 'C'])
print model.probability(['B', 'B', 'B'])
print
print model.log_probability(['C', 'A', 'B'])
print model.log_probability(['B', 'A', 'A'])
print model.predict_proba({})
marginals = model.predict_proba({})
print marginals[0].parameters[0]
model.predict_proba({'guest': 'A'})
model.predict_proba({'guest': 'A', 'monty': 'C'})
model.predict([['B', 'A', None],
['C', 'A', None],
['B', 'C', None],
['A', 'B', None]])
model.fit([['A', 'B', 'C'],
['A', 'C', 'B'],
['A', 'A', 'C'],
['B', 'B', 'C'],
['B', 'C', 'A']])
print model.predict_proba({})
%pylab inline
import time
times = []
for i in range(2, 18):
tic = time.time()
X = numpy.random.randint(2, size=(10000, i))
model = BayesianNetwork.from_samples(X, algorithm='exact')
times.append( time.time() - tic )
import seaborn
seaborn.set_style('whitegrid')
plt.figure(figsize=(14, 6))
plt.title('Time To Learn Bayesian Network', fontsize=18)
plt.xlabel("Number of Variables", fontsize=14)
plt.ylabel("Time (s)", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.plot(range(2, 18), times, linewidth=3, color='c')
plt.yscale('log')
times = []
for i in range(2, 253, 10):
tic = time.time()
X = numpy.random.randint(2, size=(10000, i))
model = BayesianNetwork.from_samples(X, algorithm='chow-liu')
times.append( time.time() - tic )
import seaborn
seaborn.set_style('whitegrid')
plt.figure(figsize=(14, 6))
plt.title('Time To Learn Bayesian Network', fontsize=18)
plt.xlabel("Number of Variables", fontsize=14)
plt.ylabel("Time (s)", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.plot( range(2, 253, 10), times, linewidth=3, color='c')
plt.yscale('log')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Probability
Step2: Inference
Step3: We are returned three DiscreteDistribution objects, each representing the marginal distribution for each variable, in the same order they were put into the model. In this case, they represent the guest, prize, and monty variables respectively. We see that everything is equally likely. If we want to access these distributions, we can do the following
Step4: The first element of marginals is a DiscreteDistribution, with all the same operations as a normal DiscreteDistribution objects. This means that parameters[0] will return the underlying dictionary used by the distribution, which we return here.
Step5: We can see that now Monty will not open door 'A', because the guest has chosen it. At the same time, the distribution over the prize has not changed, it is still equally likely that the prize is behind each door.
Step6: Suddenly, we see that the distribution over prizes has changed. It is now twice as likely that the car is behind the door labeled 'B'. This illustrates the somewhat famous Monty Hall problem.
Step7: Parameter Fitting
Step8: Structure Learning
Step9: The algorithm looks like it's running in exponential time, which is a major feat for the prospect of Bayesian network structure learning. Combined with an efficient implementation of the algorithm, this seems like calculating a graph with mid-20 variables would be possible on a normal computer.
|
6,814
|
<ASSISTANT_TASK:>
Python Code:
#training data
#fn = 'data/ocr/optdigits.tra'
#testing data
fn = 'data/ocr/optdigits.tes'
header="x11,x12,x13,x14,x15,x16,x17,x18,x21,x22,x23,x24,x25,x26,x27,x28,x31,x32,x33,x34,x35,x36,x37,x38,x41,x42,x43,x44,x45,x46,x47,x48,x51,x52,x53,x54,x55,x56,x57,x58,x61,x62,x63,x64,x65,x66,x67,x68,x71,x72,x73,x74,x75,x76,x77,x78,x81,x82,x83,x84,x85,x86,x87,x88,digit".split(",")
df = pd.read_csv(fn, header=None)
df.columns = header
df.head()
y = df.digit.copy().values
X = df.drop("digit", axis=1).values
X.shape, y.shape
X = X.reshape((-1, 8,8))
X.shape
@interact(X=fixed(X), y=fixed(y), idx=(0,X.shape[0]), factor=(1,50))
def show_item(X, y, idx=0, factor=5):
x = X[idx]
print("Instance %s:\t[%s]" % (
idx+1, ", ".join("'%s'" % str(k) for k in
list(x.flatten()) + [y[idx]])))
x = (((x-16)/16.0)*255).astype("int")
x = blowUp(x, factor)
fig, ax = plt.subplots(figsize=(5,5))
ax.imshow(x, cmap="Greys")
ax.set_title("Instance=%s, Digit=%s" % (idx+1, y[idx]))
plt.axis('off')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: After running the cell below, you can move the slider to visualize the various instances of the dataset, change factor slider to increase the sharpness of the image
|
6,815
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
text
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
int_to_vocab = dict(enumerate(set(text)))
vocab_to_int = dict([(v,i) for (i,v) in int_to_vocab.items()])
return (vocab_to_int, int_to_vocab)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
return({".":"||Period||",
",":"||Comma||",
"\"":"||Quotation_Mark||",
";":"||Semicolon||",
"!":"||Exclamation_mark||",
"?":"||Question_mark||",
"(":"||Left_Parentheses||",
")":"||Right_Parentheses||",
"--":"||Dash||",
"\n":"||Return||"})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
input = tf.placeholder(tf.int32,[None,None],name="input")
targets = tf.placeholder(tf.int32,[None,None],name="targets")
learning_rate = tf.placeholder(tf.float32,name="learning_rate")
return input, targets, learning_rate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
multi = tf.contrib.rnn.MultiRNNCell([cell]*2)
initial_state = multi.zero_state(batch_size,tf.float32)
return multi, tf.identity(initial_state,"initial_state")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim),-1,1))
return tf.nn.embedding_lookup(embedding,input_data)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
outputs,state = tf.nn.dynamic_rnn(cell,inputs,dtype=tf.float32)
return outputs, tf.identity(state,"final_state")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
embeddings = get_embed(input_data, vocab_size, embed_dim)
ltsm_layer,final_state = build_rnn(cell, embeddings)
full_layer = tf.contrib.layers.fully_connected(ltsm_layer,vocab_size,activation_fn=None)
return full_layer, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
nb_of_batches=int(len(int_text)/batch_size/seq_length)
def get_batch(i_batch):
input=[]
target=[]
for i_seq in range(0,batch_size):
start = i_batch*seq_length+i_seq*nb_of_batches*seq_length
end = start+seq_length
input += [int_text[start:end]]
target += [int_text[start+1:end]+[int_text[end%(nb_of_batches*batch_size*seq_length)]]]
return [input,target]
return np.array([get_batch(i_batch) for i_batch in range(0,nb_of_batches)])
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
#results still get better after 100 epochs.
num_epochs = 200
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 256
# Embedding Dimension Size
# not clear how to set that up. I tried 50 (which seems to be the minimum generally recommended for word embeddings)
# but I got worst results, probably because my dataset is too small.
embed_dim = 24
# Sequence Length
# long enough to embed a reasonably long sentence.
seq_length = 15
# Learning Rate
# I tried several value and performance start to decrease over that.
learning_rate = 0.02
# Show stats for every n number of batches
show_every_n_batches = 10
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
return loaded_graph.get_tensor_by_name("input:0"),\
loaded_graph.get_tensor_by_name("initial_state:0"),\
loaded_graph.get_tensor_by_name("final_state:0"),\
loaded_graph.get_tensor_by_name("probs:0")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
return int_to_vocab[np.argmax(probabilities)]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
6,816
|
<ASSISTANT_TASK:>
Python Code:
import os
os.chdir(os.getcwd() + '/..')
# Run some setup code for this notebook
import random
import numpy as np
import matplotlib.pyplot as plt
from utils.data_utils import get_CIFAR10_data
from utils.metrics_utils import rel_error
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
data = get_CIFAR10_data('datasets/cifar-10-batches-py', subtract_mean=True)
for k, v in data.items():
print('%s: ' % k, v.shape)
from layers.layers import batchnorm_forward
# Check the training-time forward pass by checking means ans variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before batch normalization:')
print(' means: ', a.mean(axis=0))
print(' stds: ', a.std(axis=0))
print
# Means should be close to zero and stds close to one
print('After batch normalization (gamma=1, beta=0)')
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print(' mean: ', a_norm.mean(axis=0))
print(' std: ', a_norm.std(axis=0))
print
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print('After batch normalization (nontrivial gamma, beta)')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
from layers.layers import batchnorm_backward
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in range(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
from layers.layers import batchnorm_backward
from utils.gradient_check import eval_numerical_gradient_array
# Gradient check batchnorm backward pass
np.random.seed(231)
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
# x、gamma、beta will change everywhere once change in any where
# so lambda can receive no param!!!
fx = lambda _: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda _: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda _: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
a = 1
def fs(ss):
print(ss)
f = lambda a: fs(a)
def ff(f, c):
f(c)
b = 2
ff(f, b)
# TODO
# np.random.seed(231)
# N, D = 100, 500
# x = 5 * np.random.randn(N, D) + 12
# gamma = np.random.randn(D)
# beta = np.random.randn(D)
# dout = np.random.randn(N, D)
# bn_param = {'mode': 'train'}
# out, cache = batchnorm_forward(x, gamma, beta, bn_param)
# t1 = time.time()
# dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
# t2 = time.time()
# dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
# t3 = time.time()
# print('dx difference: ', rel_error(dx1, dx2))
# print('dgamma difference: ', rel_error(dgamma1, dgamma2))
# print('dbeta difference: ', rel_error(dbeta1, dbeta2))
# print('speedup: %.2fx' % ((t2 - t1) / (t3 - t2)))
from classifiers.fc_net import FullyConnectedNet
from utils.gradient_check import eval_numerical_gradient
np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
if reg == 0: print()
from base.solver import Solver
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gca().set_ylim(1.0, 3.5)
plt.gcf().set_size_inches(10, 15)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Batch normalization
Step2: Batch Normalization
Step3: Batch Normalization
Step4: Fully Connected Nets with Batch Normalization
Step5: Batchnorm for deep networks
Step6: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
Step7: Batch normalization and initialization
|
6,817
|
<ASSISTANT_TASK:>
Python Code:
from pynq import Overlay
Overlay("base.bit").download()
from pynq.drivers import HDMI
from pynq.drivers.video import VMODE_1920x1080
hdmi_out = HDMI('out', video_mode=VMODE_1920x1080)
hdmi_in = HDMI('in', init_timeout=10, frame_list=hdmi_out.frame_list)
hdmi_in.start()
hdmi_out.start()
from IPython.display import Image
frame = hdmi_in.frame()
orig_img_path = '/home/xilinx/jupyter_notebooks/examples/data/face_detect.jpg'
frame.save_as_jpeg(orig_img_path)
Image(filename=orig_img_path)
import cv2
import numpy as np
frame = hdmi_in.frame_raw()
np_frame= (np.frombuffer(frame, dtype=np.uint8)).reshape(1080,1920,3)
face_cascade = cv2.CascadeClassifier(
'./data/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(
'./data/haarcascade_eye.xml')
gray = cv2.cvtColor(np_frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(np_frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = np_frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
hdmi_in.frame_index_next()
hdmi_out.frame_raw(bytearray(np_frame.tobytes()))
orig_img_path = '/home/xilinx/jupyter_notebooks/examples/data/face_detect.jpg'
hdmi_out.frame().save_as_jpeg(orig_img_path)
Image(filename=orig_img_path)
hdmi_out.stop()
hdmi_in.stop()
del hdmi_in, hdmi_out
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 2
Step2: Step 3
Step3: Step 4
Step4: Step 5
Step5: Step 6
Step6: Step 7
Step7: Step 7
|
6,818
|
<ASSISTANT_TASK:>
Python Code:
import mxnet as mx
from mxnet import nd, autograd
mx.random.seed(1)
x = nd.array([[1, 2], [3, 4]])
x
# Returns None type:
x.attach_grad()
with autograd.record():
y = x * 2
z = y * x
print(x)
print(y)
print(z)
# Returns None type
z.backward()
print(x.grad)
with autograd.record():
y = x * 2
z = y * x
head_gradient = nd.array([[10, 1.], [.1, .01]])
z.backward(head_gradient)
print(x.grad)
a = nd.random_normal(shape=3)
a.attach_grad()
with autograd.record():
b = a * 2
while (nd.norm(b) < 1000).asscalar():
b = b * 2
if (mx.nd.sum(b) > 0).asscalar():
c = b
else:
c = 100 * b
head_gradient = nd.array([0.01, 0.1, 1.0])
c.backward(head_gradient)
print(a.grad)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Attaching gradients
Step2: Once we compute the gradient of f with respect to x, we'll need a place to store it.
Step3: Now we’re going to define the function f and MXNet will generate a computation graph on the fly.
Step4: Backpropagation time.
Step5: Now let's determine the expected output.
Step6: Head gradients and the chain rule
Step7: Now that we know the basics, we can do some wild things with autograd, including building differentiable functions using Pythonic control flow.
|
6,819
|
<ASSISTANT_TASK:>
Python Code:
!pip install astronn
import torch
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from astroNN.datasets import load_galaxy10
from astroNN.datasets.galaxy10 import galaxy10cls_lookup
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
# Readin the data
images, labels = load_galaxy10()
labels = labels.astype(np.float32)
images = images.astype(np.float32)
images = torch.tensor(images)
labels = torch.tensor(labels)
# Cut down the resolution of the images!!! What is this line doing in words?
images = images[:,::6,::6,1]
#Plot an example image here
#Flatten images here
#Normalize the flux of the images here
class Autoencoder(torch.nn.Module):
# this defines the model
def __init__(self, input_size, hidden_size, hidden_inner, encoded_size):
super(Autoencoder, self).__init__()
print(input_size,hidden_size,encoded_size)
self.input_size = input_size
self.hidden_size = hidden_size
self.encoded_size = encoded_size
self.hidden_inner = hidden_inner
self.hiddenlayer1 = torch.nn.Linear(self.input_size, self.hidden_size)
# ADD A LAYER HERE
self.encodedlayer = torch.nn.Linear(self.hidden_inner, self.encoded_size)
self.hiddenlayer3 = torch.nn.Linear(self.encoded_size, self.hidden_inner)
# ADD A LAYER HERE
self.outputlayer = torch.nn.Linear(self.hidden_size, self.input_size)
# some nonlinear options
self.sigmoid = torch.nn.Sigmoid()
self.softmax = torch.nn.Softmax()
self.relu = torch.nn.ReLU()
def forward(self, x):
layer1 = self.hiddenlayer1(x)
activation1 = self.ACTIVATION?(layer1)
layer2 = self.hiddenlayer2(activation1)
activation2 = self.ACTIVATION?(layer2)
layer3 = self.encodedlayer(activation2)
activation3 = self.ACTIVATION?(layer3)
layer4 = self.hiddenlayer3(activation3)
activation4 = self.ACTIVATION?(layer4)
layer5 = self.hiddenlayer4(activation4)
activation5 = self.ACTIVATION?(layer5)
layer6 = self.outputlayer(activation5)
output = self.ACTIVATION?(layer6)
# Why do I have two outputs?
return output, layer3
# train the model
def train_model(training_data,test_data, model):
# define the optimization
criterion = torch.nn.MSELoss()
# Choose between these two optimizers
#optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
#optimizer = torch.optim.Adam(model.parameters(), lr=0.1,weight_decay=1e-6)
for epoch in range(500):
# clear the gradient
optimizer.zero_grad()
# compute the model output
myoutput, encodings_train = model(training_data)
# calculate loss
loss = criterion(myoutput, training_data)
# credit assignment
loss.backward()
# update model weights
optimizer.step()
# Add a plot of the loss vs epoch for the test and training sets here
#Do your training here!!
hidden_size_1 = 100
hidden_size_2 = 50
encoded_size = 10
model = Autoencoder(np.shape(images_train[0])[0],hidden_size_1,hidden_size_2,encoded_size)
train_model(images_train, images_test, model)
#Make an image of the original image
#Make an image of its reconstruction
#Make an image of (original - reconstruction)
#Scatter plot between two dimensions of the latent space
#Try coloring the points
clf = RandomForestClassifier(...)
clf.fit(...)
new_labels = clf.predict(...)
cm = confusion_matrix(labels_test,new_labels,normalize='true')
disp = ConfusionMatrixDisplay(confusion_matrix=cm)
disp.plot()
plt.show()
clf = IsolationForest(...).fit(encodings)
scores = -clf.score_samples(encodings) #I am taking the negative because the lowest score is actually the weirdest, which I don't like...
#Plot an image of the weirdest galazy!
#This plots the cumulative distribution
def cdf(x, label='',plot=True, *args, **kwargs):
x, y = sorted(x), np.arange(len(x)) / len(x)
return plt.plot(x, y, *args, **kwargs, label=label) if plot else (x, y)
ulabels = np.unique(labels)
for ulabel in ulabels:
gind = np.where(labels==ulabel)
cdf(...)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1a
Step2: Problem 1b.
Step3: Problem 3. Training
Step4: Problem 4a. Understand our Results
Step5: Problem 4b.
Step6: Bonus Problem 5a Playing with the Latent Space
Step7: Bonus Problem 5b Playing with the Latent Space
|
6,820
|
<ASSISTANT_TASK:>
Python Code:
from collections import deque
q = deque(maxlen = 3)
q.append(1)
q.append(2)
q.append(3)
q
q.append(4)
q
q.append(5)
q
q = deque()
q.append(1)
q.append(2)
q.append(3)
q
q.appendleft(4)
q
q.pop()
q
q.popleft()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 尽管你也可以手动在一个列表上实现这一的操作(比如增加、删除等等)。但是这里的队列方案会更加优雅并且运行得更快些。
|
6,821
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# TensorFlow and tf.keras
import tensorflow.compat.v1 as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Grab an image from the test dataset
img = test_images[1]
print(img.shape)
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
plt.xticks(range(10), class_names, rotation=45)
plt.show()
prediction_result = np.argmax(predictions_single[0])
print(prediction_result)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train your first neural network
Step2: Import the Fashion MNIST dataset
Step3: Loading the dataset returns four NumPy arrays
Step4: Explore the data
Step5: Likewise, there are 60,000 labels in the training set
Step6: Each label is an integer between 0 and 9
Step7: There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels
Step8: And the test set contains 10,000 images labels
Step9: Preprocess the data
Step10: We scale these values to a range of 0 to 1 before feeding to the neural network model. For this, we divide the values by 255. It's important that the training set and the testing set are preprocessed in the same way
Step11: Display the first 25 images from the training set and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
Step12: Build the model
Step13: The first layer in this network, tf.keras.layers.Flatten, transforms the format of the images from a 2d-array (of 28 by 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
Step14: Train the model
Step15: As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.88 (or 88%) on the training data.
Step16: It turns out, the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of overfitting. Overfitting is when a machine learning model performs worse on new data than on their training data.
Step17: Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction
Step18: A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value
Step19: So the model is most confident that this image is an ankle boot, or class_names[9]. And we can check the test label to see this is correct
Step20: We can graph this to look at the full set of 10 class predictions
Step21: Let's look at the 0th image, predictions, and prediction array.
Step22: Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident.
Step23: Finally, use the trained model to make a prediction about a single image.
Step24: tf.keras models are optimized to make predictions on a batch, or collection, of examples at once. So even though we're using a single image, we need to add it to a list
Step25: Now predict the image
Step26: model.predict returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch
|
6,822
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
np.random.seed(0) # set seed for reproducible results
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: sawtooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Get the estimated sources
A_ = ica.mixing_ # Get estimated mixing matrix
# compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # estimate PCA sources
plt.figure(figsize=(9, 6))
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA estimated sources',
'PCA estimated sources']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now try to recover the sources
|
6,823
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
% config InlineBackend.figure_format = 'retina'
from scipy import stats
x = np.linspace(3.5, 4.0, 100)
loc = 3.7
scl = 0.02
y = stats.norm.pdf(x, loc=loc, scale=scl)
yalt = stats.norm.logpdf(x, loc=loc, scale=scl)
np.trapz(y, x)
plt.plot(x, np.log(y))
plt.xlabel('$\log{g}$')
plt.ylabel('$\ln{p}$')
plt.ylim(ymin=-20)
def lnprior_fn(self, p):
#For now just hardcode the location and scale parameters.
# log-g
loc = 3.7
scl = 0.1
lnprior_logg = stats.norm.logpdf(p.grid[1], loc=loc, scale=scl)
#Everything else will have a flat prior over the grid.
lnprior_allelse = 0.0
lnprior_out = lnprior_logg + lnprior_allelse
return lnprior_out
import h5py
!cp /Users/gully/GitHub/welter/sf/m086/output/LkCa4_sm086/run03/mc.hdf5 .
f = h5py.File('mc.hdf5', mode='r')
list(f.keys())
d = f['samples']
list(d.attrs)
d.attrs['acceptance']
f.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We want a a continuous prior
Step2: The normalization doesn't matter, but it's nice to know that it's close to normalized.
Step3: What do the chains look like when you use a prior?
Step4: Too small! Here's why
|
6,824
|
<ASSISTANT_TASK:>
Python Code:
from nltk.util import ngrams
from collections import defaultdict
from collections import OrderedDict
import string
import time
import gc
from math import log10
start_time = time.time()
#returns: string
#arg: string
#remove punctuations and make the string lowercase
def removePunctuations(sen):
#split the string into word tokens
temp_l = sen.split()
#print(temp_l)
i = 0
j = 0
#changes the word to lowercase and removes punctuations from it
for word in temp_l :
j = 0
#print(len(word))
for l in word :
if l in string.punctuation:
if l == "'":
if j+1<len(word) and word[j+1] == 's':
j = j + 1
continue
word = word.replace(l," ")
#print(j,word[j])
j += 1
temp_l[i] = word.lower()
i=i+1
#spliting is being don here beacause in sentences line here---so after punctuation removal it should
#become "here so"
content = " ".join(temp_l)
return content
#returns : int
#arg: string,dict,dict,dict,dict
#loads the corpus for the dataset and makes the frequency count of quadgram ,bigram and trigram strings
def loadCorpus(file_path, bi_dict, tri_dict, quad_dict, vocab_dict):
w1 = '' #for storing the 3rd last word to be used for next token set
w2 = '' #for storing the 2nd last word to be used for next token set
w3 = '' #for storing the last word to be used for next token set
token = []
#total no. of words in the corpus
word_len = 0
#open the corpus file and read it line by line
with open(file_path,'r') as file:
for line in file:
#split the string into word tokens
temp_l = line.split()
i = 0
j = 0
#does the same as the removePunctuations() function,implicit declratation for performance reasons
#changes the word to lowercase and removes punctuations from it
for word in temp_l :
j = 0
#print(len(word))
for l in word :
if l in string.punctuation:
if l == "'":
if j+1<len(word) and word[j+1] == 's':
j = j + 1
continue
word = word.replace(l," ")
#print(j,word[j])
j += 1
temp_l[i] = word.lower()
i=i+1
#spliting is being done here beacause in sentences line here---so after punctuation removal it should
#become "here so"
content = " ".join(temp_l)
token = content.split()
word_len = word_len + len(token)
if not token:
continue
#add the last word from previous line
if w3!= '':
token.insert(0,w3)
temp0 = list(ngrams(token,2))
#since we are reading line by line some combinations of word might get missed for pairing
#for trigram
#first add the previous words
if w2!= '':
token.insert(0,w2)
#tokens for trigrams
temp1 = list(ngrams(token,3))
#insert the 3rd last word from previous line for quadgram pairing
if w1!= '':
token.insert(0,w1)
#add new unique words to the vocaulary set if available
for word in token:
if word not in vocab_dict:
vocab_dict[word] = 1
else:
vocab_dict[word]+= 1
#tokens for quadgrams
temp2 = list(ngrams(token,4))
#count the frequency of the bigram sentences
for t in temp0:
sen = ' '.join(t)
bi_dict[sen] += 1
#count the frequency of the trigram sentences
for t in temp1:
sen = ' '.join(t)
tri_dict[sen] += 1
#count the frequency of the quadgram sentences
for t in temp2:
sen = ' '.join(t)
quad_dict[sen] += 1
#then take out the last 3 words
n = len(token)
#store the last few words for the next sentence pairing
w1 = token[n -3]
w2 = token[n -2]
w3 = token[n -1]
return word_len
#returns: void
#arg: dict,dict,dict,dict,dict,dict,int
#creates dict for storing probable words with their probabilities for a trigram sentence
def findQuadgramProbGT(vocab_dict, bi_dict, tri_dict, quad_dict, quad_prob_dict, nc_dict, k):
i = 0
V = len(vocab_dict)
for quad_sen in quad_dict:
quad_token = quad_sen.split()
#trigram sentence for key
tri_sen = ' '.join(quad_token[:3])
#find the probability
#Good Turing smoothing has been used
quad_count = quad_dict[quad_sen]
tri_count = tri_dict[tri_sen]
if quad_dict[quad_sen] <= k or (quad_sen not in quad_dict):
quad_count = findGoodTuringAdjustCount( quad_dict[quad_sen], k, nc_dict)
if tri_dict[tri_sen] <= k or (tri_sen not in tri_dict):
tri_count = findGoodTuringAdjustCount( tri_dict[tri_sen], k, nc_dict)
prob = quad_count / tri_count
#add the trigram to the quadgram probabiltity dict
if tri_sen not in quad_prob_dict:
quad_prob_dict[tri_sen] = []
quad_prob_dict[tri_sen].append([prob,quad_token[-1]])
else:
quad_prob_dict[tri_sen].append([prob,quad_token[-1]])
prob = None
quad_token = None
tri_sen = None
#returns: void
#arg: dict,dict,dict,dict,dict,int
#creates dict for storing probable words with their probabilities for a bigram sentence
def findTrigramProbGT(vocab_dict, bi_dict, tri_dict, tri_prob_dict, nc_dict, k):
#vocabulary length
V = len(vocab_dict)
#create a dictionary of probable words with their probabilities for
#trigram probabilites,key is a bigram and value is a list of prob and word
for tri in tri_dict:
tri_token = tri.split()
#bigram sentence for key
bi_sen = ' '.join(tri_token[:2])
#find the probability
#Good Turing smoothing has been used
tri_count = tri_dict[tri]
bi_count = bi_dict[bi_sen]
if tri_dict[tri] <= k or (tri not in tri_dict):
tri_count = findGoodTuringAdjustCount( tri_dict[tri], k, nc_dict)
if bi_dict[bi_sen] <= k or (bi_sen not in bi_dict):
bi_count = findGoodTuringAdjustCount( bi_dict[bi_sen], k, nc_dict)
prob = tri_count / bi_count
#add the bigram sentence to the trigram probability dict
#tri_prob_dict is a dict of list
if bi_sen not in tri_prob_dict:
tri_prob_dict[bi_sen] = []
tri_prob_dict[bi_sen].append([prob,tri_token[-1]])
else:
tri_prob_dict[bi_sen].append([prob,tri_token[-1]])
prob = None
tri_token = None
bi_sen = None
#returns: void
#arg: dict,dict,dict,dict,int
#creates dict for storing probable words with their probabilities for a unigram
def findBigramProbGT(vocab_dict, bi_dict, bi_prob_dict, nc_dict, k):
#vocabulary size
V = len(vocab_dict)
#create a dictionary of probable words with their probabilities for bigram probabilites
for bi in bi_dict:
bi_token = bi.split()
#unigram for key
unigram = bi_token[0]
#find the probability
#Good Turing smoothing has been used
bi_count = bi_dict[bi]
uni_count = vocab_dict[unigram]
if bi_dict[bi] <= k or (bi not in bi_dict):
bi_count = findGoodTuringAdjustCount( bi_dict[bi], k, nc_dict)
if vocab_dict[unigram] <= k or (unigram not in vocab_dict):
uni_count = findGoodTuringAdjustCount( vocab_dict[unigram], k, nc_dict)
prob = bi_count / uni_count
#add the unigram to the bigram probability dict
#bi_prob_dict is a dict of list
if unigram not in bi_prob_dict:
bi_prob_dict[unigram] = []
bi_prob_dict[unigram].append([prob,bi_token[-1]])
else:
bi_prob_dict[unigram].append([prob,bi_token[-1]])
prob = None
bi_token = None
unigram = None
#returns: void
#arg: dict
#for sorting the probable word acc. to their probabilities
def sortProbWordDict(bi_prob_dict, tri_prob_dict, quad_prob_dict):
for key in bi_prob_dict:
if len(bi_prob_dict[key])>1:
bi_prob_dict[key] = sorted(bi_prob_dict[key],reverse = True)
for key in tri_prob_dict:
if len(tri_prob_dict[key])>1:
tri_prob_dict[key] = sorted(tri_prob_dict[key],reverse = True)
for key in quad_prob_dict:
if len(quad_prob_dict[key])>1:
quad_prob_dict[key] = sorted(quad_prob_dict[key],reverse = True)[:2]
#returns: string
#arg: void
#for taking input from user
def takeInput():
cond = False
#take input
while(cond == False):
sen = input('Enter the string\n')
sen = removePunctuations(sen)
temp = sen.split()
if len(temp) < 3:
print("Please enter atleast 3 words !")
else:
cond = True
temp = temp[-3:]
sen = " ".join(temp)
return sen
#computes the score for test data
def computeTestScore(test_token, bi_dict, tri_dict, quad_dict,
quad_prob_dict, tri_prob_dict,bi_prob_dict ):
#increment the score value if correct prediction is made else decrement its value
score = 0
wrong = 0
total = 0
with open('Test_Scores/Good_Turing_Backoff_Score.txt','w') as w:
for sent in test_token:
sen_token = sent[:3]
sen = " ".join(sen_token)
correct_word = sent[3]
result = doPredictionBackoffGT(sen, bi_dict, tri_dict, quad_dict, bi_prob_dict, tri_prob_dict,
quad_prob_dict)
if result:
if result[1] == correct_word:
score+=1
else:
wrong += 1
else:
wrong += 1
total += 1
w.write('Total Word Prdictions: '+str(total) + '\n' +'Correct Prdictions: '+str(score) +
'\n'+'Wrong Prdictions: '+str(wrong) + '\n'+'ACCURACY: '+str((score/total)*100)+'%' )
#print stats
print('Total Word Prdictions: '+str(total) + '\n' +'Correct Prdictions: '+str(score) +
'\n'+'Wrong Prdictions: '+str(wrong) + '\n'+'ACCURACY:'+str((score/total)*100)+'%' )
return score
#return:float
#arg:list,int,dict,dict,dict,dict
#computes the score for test data
def computePerplexity(test_quadgrams, bi_dict, tri_dict, quad_dict,
vocab_dict,token_len, k, quad_nc_dict, tri_nc_dict,
bi_nc_dict, uni_nc_dict):
perplexity = float(1.0)
n = token_len
for key in quad_dict:
quad_token = key.split()
quad_count = quad_dict[key]
tri_count = tri_dict[' '.join(quad_token[0:3])]
if quad_dict[key] <= k or (key not in quad_dict):
quad_count = findGoodTuringAdjustCount( quad_dict[key], k, quad_nc_dict)
if tri_dict[' '.join(quad_token[0:3])] <= k or (' '.join(quad_token[0:3]) not in tri_dict):
tri_count = findGoodTuringAdjustCount( tri_dict[' '.join(quad_token[0:3])], k, tri_nc_dict)
prob = quad_count / tri_count
if prob != 0:
perplexity = perplexity * ( prob**(1./n))
with open('Test_Scores/Good_Turing_Backoff_Score.txt','a') as w:
w.write('\nPerplexity: '+str(perplexity))
return perplexity
## Regression related stuff
#calculate best fit line for simple regression
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
#finds the slope for the best fit line
def findBestFitSlope(x,y):
m = (( mean(x)*mean(y) - mean(x*y) ) /
( mean(x)** 2 - mean(x**2)))
return m
#finds the intercept for the best fit line
def findBestFitIntercept(x,y,m):
c = mean(y) - m*mean(x)
return c
## Find the count Nc for quadgrams and trigrams where c > 5
#arg: dict, int, int, int, int
#returns: dict
#token_len : total no. of ngram tokens
def findFrequencyOfFrequencyCount(ngram_dict, k, n, V, token_len):
#for keeping count of 'c' value i.e Nc
nc_dict = {}
#we find the value of Nc,c = 0 by V^n - (total n-gram tokens)
nc_dict[0] = V**n - token_len
#find the count Nc till c = k,we will take k = 5
#find counts for n-gram
for key in ngram_dict:
if ngram_dict[key] <= k + 1:
if ngram_dict[key] not in nc_dict:
nc_dict[ ngram_dict[key]] = 1
else:
nc_dict[ ngram_dict[key] ] += 1
#check if all the values of Nc are there in the nc_dict or not ,if there then return
val_present = True
for i in range(1,7):
if i not in nc_dict:
val_present = False
break
if val_present == True:
return nc_dict
#now fill in the values of nc in case it is not there using regression upto c = 6
#we use :[ log(Nc) = blog(c) + a ] as the equation
#we first need to find data for regression that is values(Nc,c) we take 5 data points
data_pts = {}
i = 0
#get first 5 counts value i.e c
#for quadgram
for key in ngram_dict:
if ngram_dict[key] not in data_pts:
data_pts[ ngram_dict[key] ] = 1
i += 1
if i >5:
break
#now get Nc for those c values
for key in ngram_dict:
if ngram_dict[key] in data_pts:
data_pts[ ngram_dict[key] ] += 1
#make x ,y coordinates for regression
x_coor = [ np.log(item) for item in data_pts ]
y_coor = [ np.log( data_pts[item] ) for item in data_pts ]
x = np.array(x_coor, dtype = np.float64)
y = np.array(y_coor , dtype = np.float64)
#now do regression
#find the slope and intercept for the regression equation
slope_m = findBestFitSlope(x,y)
intercept_c = findBestFitIntercept(x,y,slope_m)
#now find the missing Nc terms and give them value using regression
for i in range(1,(k+2)):
if i not in nc_dict:
nc_dict[i] = (slope_m*i) + intercept_c
return nc_dict
#for finding the adjusted count c* in Good Turing Smoothing
def findGoodTuringAdjustCount(c, k, nc_dict):
adjust_count = ( ( (( c + 1)*( nc_dict[c + 1] / nc_dict[c])) - ( c * (k+1) * nc_dict[k+1] / nc_dict[1]) ) /
( 1 - (( k + 1)*nc_dict[k + 1] / nc_dict[1]) )
)
return adjust_count
#finds the word prediction usinng Backoff
def doPredictionBackoffGT(input_sen, bi_dict, tri_dict, quad_dict, bi_prob_dict, tri_prob_dict, quad_prob_dict):
#split the input sentence into tokens
token = input_sen.split()
#if the input sen is found in any ngram then give the most probable word for that ngram
#if not then go to the lower order ngram
if input_sen in quad_prob_dict and quad_prob_dict[ input_sen ][0][0]>0:
pred = quad_prob_dict[input_sen][0]
elif ' '.join(token[1:]) in tri_prob_dict and tri_prob_dict[' '.join(token[1:])][0][0]>0:
pred = tri_prob_dict[ ' '.join(token[1:]) ][0]
elif ' '.join(token[2:]) in bi_prob_dict and bi_prob_dict[ ' '.join(token[2:]) ][0][0]>0:
pred = bi_prob_dict[' '.join(token[2:])][0]
else:
pred = []
return pred
#return: void
#arg:string,string,dict,dict,dict,dict,dict
#Used for testing the Language Model
def trainCorpus(train_file,test_file,bi_dict,tri_dict,quad_dict,vocab_dict,prob_dict):
test_result = ''
score = 0
#load the training corpus for the dataset
token_len = loadCorpus(train_file, bi_dict, tri_dict, quad_dict, vocab_dict)
print("---Processing Time for Corpus Loading: %s seconds ---" % (time.time() - start_time))
start_time1 = time.time()
#create the different Nc dictionaries for ngrams
#threshold value
k = 5
V = len(vocab_dict)
quad_nc_dict = findFrequencyOfFrequencyCount(quad_dict, k, 4, V, len(quad_dict))
tri_nc_dict = findFrequencyOfFrequencyCount(tri_dict, k, 3, V, len(tri_dict))
bi_nc_dict = findFrequencyOfFrequencyCount(bi_dict, k, 2, V, len(bi_dict))
uni_nc_dict = findFrequencyOfFrequencyCount(bi_dict, k, 1, V, len(vocab_dict))
#create quadgram probability dictionary
findQuadgramProbGT(vocab_dict, bi_dict, tri_dict, quad_dict, quad_prob_dict, quad_nc_dict, k)
#create trigram probability dictionary
findTrigramProbGT(vocab_dict, bi_dict, tri_dict, tri_prob_dict, tri_nc_dict, k)
#create bigram probability dictionary
findBigramProbGT(vocab_dict, bi_dict, bi_prob_dict, bi_nc_dict, k)
#sort the probability dictionaries of quad,tri and bi grams
sortProbWordDict(bi_prob_dict, tri_prob_dict, quad_prob_dict)
print("---Processing Time for Creating Probable Word Dict: %s seconds ---" % (time.time() - start_time1))
### TESTING WITH TEST CORPUS
test_data = ''
#Now load the test corpus
with open('test_corpus.txt','r') as file :
test_data = file.read()
#remove punctuations from the test data
test_data = removePunctuations(test_data)
test_token = test_data.split()
#split the test data into 4 words list
test_token = test_data.split()
test_quadgrams = list(ngrams(test_token,4))
#choose most probable words for prediction
start_time2 = time.time()
score = computeTestScore(test_quadgrams, bi_dict, tri_dict, quad_dict,
quad_prob_dict, tri_prob_dict,bi_prob_dict )
print('Score:',score)
print("---Processing Time for computing score: %s seconds ---" % (time.time() - start_time2))
start_time3 = time.time()
perplexity = computePerplexity(test_quadgrams, bi_dict, tri_dict, quad_dict,
vocab_dict,token_len, k, quad_nc_dict, tri_nc_dict,
bi_nc_dict, uni_nc_dict)
print('Perplexity:',perplexity)
print("---Processing Time for computing Perplexity: %s seconds ---" % (time.time() - start_time3))
def main():
#variable declaration
vocab_dict = defaultdict(int) #for storing the different words with their frequencies
bi_dict = defaultdict(int) #for keeping count of sentences of two words
tri_dict = defaultdict(int) #for keeping count of sentences of three words
quad_dict = defaultdict(int) #for keeping count of sentences of four words
quad_prob_dict = OrderedDict()
tri_prob_dict = OrderedDict()
bi_prob_dict = OrderedDict()
#load the corpus for the dataset
train_file = 'corpusfile.txt'
#load corpus
token_len = loadCorpus(train_file, bi_dict, tri_dict, quad_dict, vocab_dict)
#create the different Nc dictionaries for ngrams
#threshold value
k = 5
V = len(vocab_dict)
quad_nc_dict = findFrequencyOfFrequencyCount(quad_dict, k, 4, V, len(quad_dict))
tri_nc_dict = findFrequencyOfFrequencyCount(tri_dict, k, 3, V, len(tri_dict))
bi_nc_dict = findFrequencyOfFrequencyCount(bi_dict, k, 2, V, len(bi_dict))
uni_nc_dict = findFrequencyOfFrequencyCount(bi_dict, k, 1, V, len(vocab_dict))
#create quadgram probability dictionary
findQuadgramProbGT(vocab_dict, bi_dict, tri_dict, quad_dict, quad_prob_dict, quad_nc_dict, k)
#create trigram probability dictionary
findTrigramProbGT(vocab_dict, bi_dict, tri_dict, tri_prob_dict, tri_nc_dict, k)
#create bigram probability dictionary
findBigramProbGT(vocab_dict, bi_dict, bi_prob_dict, bi_nc_dict, k)
#sort the probability dictionaries of quad,tri and bi grams
sortProbWordDict(bi_prob_dict, tri_prob_dict, quad_prob_dict)
##WORD PREDICTION
#take user input
input_sen = takeInput()
prediction = doPredictionBackoffGT(input_sen, bi_dict, tri_dict, quad_dict, bi_prob_dict, tri_prob_dict, quad_prob_dict)
if prediction:
print('Word Prediction:',prediction[1])
if __name__ == '__main__':
main()
#variable declaration
vocab_dict = defaultdict(int) #for storing the different words with their frequencies
bi_dict = defaultdict(int) #for keeping count of sentences of two words
tri_dict = defaultdict(int) #for keeping count of sentences of three words
quad_dict = defaultdict(int) #for keeping count of sentences of four words
quad_prob_dict = OrderedDict()
tri_prob_dict = OrderedDict()
bi_prob_dict = OrderedDict()
#load the corpus for the dataset
#loadCorpus('corpusfile.txt',bi_dict,tri_dict,quad_dict,vocab_dict)
print("---Preprocessing Time for Corpus loading: %s seconds ---" % (time.time() - start_time))
train_file = 'training_corpus.txt'
test_file = 'test_corpus.txt'
#load the corpus for the dataset
token_len = trainCorpus(train_file,test_file,bi_dict,tri_dict,quad_dict,vocab_dict,quad_prob_dict)
train_file = 'corpusfile.txt'
#load corpus
token_len = loadCorpus(train_file, bi_dict, tri_dict, quad_dict, vocab_dict)
#create the different Nc dictionaries for ngrams
#threshold value
k = 5
V = len(vocab_dict)
quad_nc_dict = findFrequencyOfFrequencyCount(quad_dict, k, 4, V, len(quad_dict))
tri_nc_dict = findFrequencyOfFrequencyCount(tri_dict, k, 3, V, len(tri_dict))
bi_nc_dict = findFrequencyOfFrequencyCount(bi_dict, k, 2, V, len(bi_dict))
uni_nc_dict = findFrequencyOfFrequencyCount(bi_dict, k, 1, V, len(vocab_dict))
#create quadgram probability dictionary
findQuadgramProbGT(vocab_dict, bi_dict, tri_dict, quad_dict, quad_prob_dict, quad_nc_dict, k)
#create trigram probability dictionary
findTrigramProbGT(vocab_dict, bi_dict, tri_dict, tri_prob_dict, tri_nc_dict, k)
#create bigram probability dictionary
findBigramProbGT(vocab_dict, bi_dict, bi_prob_dict, bi_nc_dict, k)
#sort the probability dictionaries of quad,tri and bi grams
sortProbWordDict(bi_prob_dict, tri_prob_dict, quad_prob_dict)
#FOR DEBUGGING ONLY
writeProbDicts(bi_prob_dict, tri_prob_dict, quad_prob_dict)
##WORD PREDICTION
start_time2 = time.time()
#take user input
input_sen = takeInput()
prediction = doPredictionBackoffGT(input_sen, bi_dict, tri_dict, quad_dict, bi_prob_dict, tri_prob_dict, quad_prob_dict)
if prediction:
print('Word Prediction:',prediction[1])
print("---Time for Prediction Operation: %s seconds ---" % (time.time() - start_time2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <u>Do preprocessing</u>
Step2: Tokenize and load the corpus data
Step3: Create a Hash Table for Probable words for Trigram sentences
Step4: Create a Hash Table for Probable words for Bigram sentences
Step5: Create a Hash Table for Probable words for Unigram
Step6: Sort the probable words for the various Probability Dictionaries according to their probability
Step7: <u>For Taking input from the User</u>
Step8: <u>Test Score ,Perplexity Calculation
Step9: For Computing the Perplexity
Step10: <u>Regression related stuff
Step11: <u>Find the count Nc for quadgrams and trigrams where c > k , k = 5
Step12: <u>For finding the Good Turing Probability
Step13: <u>Driver function for doing the prediction</u>
Step14: <u>Driver Function for Testing the Language Model</u>
Step15: main function
Step16: <i><u>For Debugging Purpose Only</u></i>
Step17: For Testing the Language Model
|
6,825
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
print("TensorFlow version: ", tf.version.VERSION)
# This will be an int32 tensor by default; see "dtypes" below.
rank_0_tensor = tf.constant(4)
print(rank_0_tensor)
# Let's make this a float tensor.
rank_1_tensor = tf.constant([2.0, 3.0, 4.0])
print(rank_1_tensor)
# If we want to be specific, we can set the dtype (see below) at creation time
# TODO 1a
rank_2_tensor = tf.constant([[1, 2], [3, 4], [5, 6]], dtype=tf.float16)
print(rank_2_tensor)
# There can be an arbitrary number of
# axes (sometimes called "dimensions")
rank_3_tensor = tf.constant(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]],
[[20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
print(rank_3_tensor)
# TODO 1b
np.array(rank_2_tensor)
# TODO 1c
rank_2_tensor.numpy()
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[1, 1], [1, 1]]) # Could have also said `tf.ones([2,2])`
print(tf.add(a, b), "\n")
print(tf.multiply(a, b), "\n")
print(tf.matmul(a, b), "\n")
print(a + b, "\n") # element-wise addition
print(a * b, "\n") # element-wise multiplication
print(a @ b, "\n") # matrix multiplication
c = tf.constant([[4.0, 5.0], [10.0, 1.0]])
# Find the largest value
print(tf.reduce_max(c))
# TODO 1d
# Find the index of the largest value
print(tf.argmax(c))
# Compute the softmax
print(tf.nn.softmax(c))
rank_4_tensor = tf.zeros([3, 2, 4, 5])
print("Type of every element:", rank_4_tensor.dtype)
print("Number of dimensions:", rank_4_tensor.ndim)
print("Shape of tensor:", rank_4_tensor.shape)
print("Elements along axis 0 of tensor:", rank_4_tensor.shape[0])
print("Elements along the last axis of tensor:", rank_4_tensor.shape[-1])
print("Total number of elements (3*2*4*5): ", tf.size(rank_4_tensor).numpy())
rank_1_tensor = tf.constant([0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
print(rank_1_tensor.numpy())
print("First:", rank_1_tensor[0].numpy())
print("Second:", rank_1_tensor[1].numpy())
print("Last:", rank_1_tensor[-1].numpy())
print("Everything:", rank_1_tensor[:].numpy())
print("Before 4:", rank_1_tensor[:4].numpy())
print("From 4 to the end:", rank_1_tensor[4:].numpy())
print("From 2, before 7:", rank_1_tensor[2:7].numpy())
print("Every other item:", rank_1_tensor[::2].numpy())
print("Reversed:", rank_1_tensor[::-1].numpy())
print(rank_2_tensor.numpy())
# Pull out a single value from a 2-rank tensor
print(rank_2_tensor[1, 1].numpy())
# Get row and column tensors
print("Second row:", rank_2_tensor[1, :].numpy())
print("Second column:", rank_2_tensor[:, 1].numpy())
print("Last row:", rank_2_tensor[-1, :].numpy())
print("First item in last column:", rank_2_tensor[0, -1].numpy())
print("Skip the first row:")
print(rank_2_tensor[1:, :].numpy(), "\n")
print(rank_3_tensor[:, :, 4])
# Shape returns a `TensorShape` object that shows the size on each dimension
var_x = tf.Variable(tf.constant([[1], [2], [3]]))
print(var_x.shape)
# You can convert this object into a Python list, too
print(var_x.shape.as_list())
# TODO 2a
# We can reshape a tensor to a new shape.
# Note that we're passing in a list
reshaped = tf.reshape(var_x, [1, 3])
print(var_x.shape)
print(reshaped.shape)
print(rank_3_tensor)
# A `-1` passed in the `shape` argument says "Whatever fits".
print(tf.reshape(rank_3_tensor, [-1]))
print(tf.reshape(rank_3_tensor, [3 * 2, 5]), "\n")
print(tf.reshape(rank_3_tensor, [3, -1]))
# Bad examples: don't do this
# You can't reorder axes with reshape.
print(tf.reshape(rank_3_tensor, [2, 3, 5]), "\n")
# This is a mess
print(tf.reshape(rank_3_tensor, [5, 6]), "\n")
# This doesn't work at all
try:
tf.reshape(rank_3_tensor, [7, -1])
except Exception as e:
print(e)
# TODO 2b
the_f64_tensor = tf.constant([2.2, 3.3, 4.4], dtype=tf.float64)
the_f16_tensor = tf.cast(the_f64_tensor, dtype=tf.float16)
# Now, let's cast to an uint8 and lose the decimal precision
the_u8_tensor = tf.cast(the_f16_tensor, dtype=tf.uint8)
print(the_u8_tensor)
x = tf.constant([1, 2, 3])
y = tf.constant(2)
z = tf.constant([2, 2, 2])
# All of these are the same computation
print(tf.multiply(x, 2))
print(x * y)
print(x * z)
# These are the same computations
x = tf.reshape(x, [3, 1])
y = tf.range(1, 5)
print(x, "\n")
print(y, "\n")
print(tf.multiply(x, y))
x_stretch = tf.constant([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]])
y_stretch = tf.constant([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
print(x_stretch * y_stretch) # Again, operator overloading
print(tf.broadcast_to(tf.constant([1, 2, 3]), [3, 3]))
ragged_list = [[0, 1, 2, 3], [4, 5], [6, 7, 8], [9]]
try:
tensor = tf.constant(ragged_list)
except Exception as e:
print(e)
# TODO 2c
ragged_tensor = tf.ragged.constant(ragged_list)
print(ragged_tensor)
print(ragged_tensor.shape)
# Tensors can be strings, too here is a scalar string.
scalar_string_tensor = tf.constant("Gray wolf")
print(scalar_string_tensor)
# If we have two string tensors of different lengths, this is OK.
tensor_of_strings = tf.constant(["Gray wolf", "Quick brown fox", "Lazy dog"])
# Note that the shape is (2,), indicating that it is 2 x unknown.
print(tensor_of_strings)
tf.constant("🥳👍")
# We can use split to split a string into a set of tensors
print(tf.strings.split(scalar_string_tensor, sep=" "))
# ...but it turns into a `RaggedTensor` if we split up a tensor of strings,
# as each string might be split into a different number of parts.
print(tf.strings.split(tensor_of_strings))
text = tf.constant("1 10 100")
print(tf.strings.to_number(tf.strings.split(text, " ")))
byte_strings = tf.strings.bytes_split(tf.constant("Duck"))
byte_ints = tf.io.decode_raw(tf.constant("Duck"), tf.uint8)
print("Byte strings:", byte_strings)
print("Bytes:", byte_ints)
# Or split it up as unicode and then decode it
unicode_bytes = tf.constant("アヒル 🦆")
unicode_char_bytes = tf.strings.unicode_split(unicode_bytes, "UTF-8")
unicode_values = tf.strings.unicode_decode(unicode_bytes, "UTF-8")
print("\nUnicode bytes:", unicode_bytes)
print("\nUnicode chars:", unicode_char_bytes)
print("\nUnicode values:", unicode_values)
# Sparse tensors store values by index in a memory-efficient manner
# TODO 2d
sparse_tensor = tf.sparse.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]
)
print(sparse_tensor, "\n")
# We can convert sparse tensors to dense
print(tf.sparse.to_dense(sparse_tensor))
import tensorflow as tf
# Uncomment to see where your variables get placed (see below)
# tf.debugging.set_log_device_placement(True)
# TODO 3a
my_tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
my_variable = tf.Variable(my_tensor)
# Variables can be all kinds of types, just like tensors
bool_variable = tf.Variable([False, False, False, True])
complex_variable = tf.Variable([5 + 4j, 6 + 1j])
print("Shape: ", my_variable.shape)
print("DType: ", my_variable.dtype)
print("As NumPy: ", my_variable.numpy)
print("A variable:", my_variable)
print("\nViewed as a tensor:", tf.convert_to_tensor(my_variable))
print("\nIndex of highest value:", tf.argmax(my_variable))
# This creates a new tensor; it does not reshape the variable.
print("\nCopying and reshaping: ", tf.reshape(my_variable, ([1, 4])))
a = tf.Variable([2.0, 3.0])
# This will keep the same dtype, float32
a.assign([1, 2])
# Not allowed as it resizes the variable:
try:
a.assign([1.0, 2.0, 3.0])
except Exception as e:
print(e)
a = tf.Variable([2.0, 3.0])
# Create b based on the value of a
b = tf.Variable(a)
a.assign([5, 6])
# a and b are different
print(a.numpy())
print(b.numpy())
# There are other versions of assign
print(a.assign_add([2, 3]).numpy()) # [7. 9.]
print(a.assign_sub([7, 9]).numpy()) # [0. 0.]
# Create a and b; they have the same value but are backed by different tensors.
a = tf.Variable(my_tensor, name="Mark")
# A new variable with the same name, but different value
# Note that the scalar add is broadcast
b = tf.Variable(my_tensor + 1, name="Mark")
# These are elementwise-unequal, despite having the same name
print(a == b)
step_counter = tf.Variable(1, trainable=False)
with tf.device("CPU:0"):
# Create some tensors
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c = tf.matmul(a, b)
print(c)
with tf.device("CPU:0"):
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.Variable([[1.0, 2.0, 3.0]])
with tf.device("GPU:0"):
# Element-wise multiply
k = a * b
print(k)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lab Task 1
Step2: A "vector" or "rank-1" tensor is like a list of values. A vector has 1-axis
Step3: A "matrix" or "rank-2" tensor has 2-axes
Step4: <table>
Step5: There are many ways you might visualize a tensor with more than 2-axes.
Step6: Tensors often contain floats and ints, but have many other types, including
Step7: Tensors are used in all kinds of operations (ops).
Step8: About shapes
Step9: <table>
Step10: While axes are often referred to by their indices, you should always keep track of the meaning of each. Often axes are ordered from global to local
Step11: Indexing with a scalar removes the dimension
Step12: Indexing with a
Step13: Multi-axis indexing
Step14: Passing an integer for each index the result is a scalar.
Step15: You can index using any combination integers and slices
Step16: Here is an example with a 3-axis tensor
Step17: <table>
Step18: You can reshape a tensor into a new shape. Reshaping is fast and cheap as the underlying data does not need to be duplicated.
Step19: The data maintains it's layout in memory and a new tensor is created, with the requested shape, pointing to the same data. TensorFlow uses C-style "row-major" memory ordering, where incrementing the right-most index corresponds to a single step in memory.
Step20: If you flatten a tensor you can see what order it is laid out in memory.
Step21: Typically the only reasonable uses of tf.reshape are to combine or split adjacent axes (or add/remove 1s).
Step22: <table>
Step23: <table>
Step24: Broadcasting
Step25: Likewise, 1-sized dimensions can be stretched out to match the other arguments. Both arguments can be stretched in the same computation.
Step26: <table>
Step27: Most of the time, broadcasting is both time and space efficient, as the broadcast operation never materializes the expanded tensors in memory.
Step28: Unlike a mathematical op, for example, broadcast_to does nothing special to save memory. Here, you are materializing the tensor.
Step29: Instead create a tf.RaggedTensor using tf.ragged.constant
Step30: The shape of a tf.RaggedTensor contains unknown dimensions
Step31: String tensors
Step32: And a vector of strings
Step33: In the above printout the b prefix indicates that tf.string dtype is not a unicode string, but a byte-string. See the Unicode Tutorial for more about working with unicode text in TensorFlow.
Step34: Some basic functions with strings can be found in tf.strings, including tf.strings.split.
Step35: <table>
Step36: Although you can't use tf.cast to turn a string tensor into numbers, you can convert it into bytes, and then into numbers.
Step37: The tf.string dtype is used for all raw bytes data in TensorFlow. The tf.io module contains functions for converting data to and from bytes, including decoding images and parsing csv.
Step38: Lab Task 3
Step39: Create a variable
Step40: A variable looks and acts like a tensor, and, in fact, is a data structure backed by a tf.Tensor. Like tensors, they have a dtype and a shape, and can be exported to NumPy.
Step41: Most tensor operations work on variables as expected, although variables cannot be reshaped.
Step42: As noted above, variables are backed by tensors. You can reassign the tensor using tf.Variable.assign. Calling assign does not (usually) allocate a new tensor; instead, the existing tensor's memory is reused.
Step43: If you use a variable like a tensor in operations, you will usually operate on the backing tensor.
Step44: Lifecycles, naming, and watching
Step45: Variable names are preserved when saving and loading models. By default, variables in models will acquire unique variable names automatically, so you don't need to assign them yourself unless you want to.
Step46: Placing variables and tensors
Step47: It's possible to set the location of a variable or tensor on one device and do the computation on another device. This will introduce delay, as data needs to be copied between the devices.
|
6,826
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
n_samples, n_features = X.shape
plt.scatter(X[:, 0], X[:, 1], c=y);
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import dendrogram
clusters = linkage(X,
metric='euclidean',
method='complete')
dendr = dendrogram(clusters)
plt.ylabel('Euclidean Distance');
from sklearn.cluster import AgglomerativeClustering
ac = AgglomerativeClustering(n_clusters=3,
affinity='euclidean',
linkage='complete')
prediction = ac.fit_predict(X)
print('Cluster labels: %s\n' % prediction)
plt.scatter(X[:, 0], X[:, 1], c=prediction);
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=400,
noise=0.1,
random_state=1)
plt.scatter(X[:,0], X[:,1])
plt.show()
from sklearn.cluster import DBSCAN
db = DBSCAN(eps=0.2,
min_samples=10,
metric='euclidean')
prediction = db.fit_predict(X)
print("Predicted labels:\n", prediction)
plt.scatter(X[:, 0], X[:, 1], c=prediction);
from sklearn.datasets import make_circles
X, y = make_circles(n_samples=1500,
factor=.4,
noise=.05)
plt.scatter(X[:, 0], X[:, 1], c=y);
# %load solutions/20_clustering_comparison.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we start with some exploratory clustering, visualizing the clustering dendrogram using SciPy's linkage and dendrogram functions
Step2: Next, let's use the AgglomerativeClustering estimator from scikit-learn and divide the dataset into 3 clusters. Can you guess which 3 clusters from the dendrogram it will reproduce?
Step3: Density-based Clustering - DBSCAN
Step4: Exercise
|
6,827
|
<ASSISTANT_TASK:>
Python Code:
s = specdal.Spectrum(filepath="/home/young/data/specdal/aidan_data/SVC/ACPA_F_B_SU_20160617_003.sig")
print(s)
print(type(s.measurement))
print(s.measurement.head())
s.interpolate(method='linear')
print(s.measurement.head())
s.plot()
plt.show()
s.stitch(method='mean')
s.plot()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The print output shows the four components of the Spectrum object. For example, we can access the measurements as follows.
Step2: Spectrum object provides several methods for processing the measurements. Let's start by linearly resampling to the nearest integer (nm) wavelengths.
Step3: We can visualize the spectrum using pyplot. spectrum.plot is just a wrapper around spectrum.measurements.plot, so you can pass any arguments for plotting pandas.Series objects.
Step4: There are folds in the spectrum near 1000 and 1900 wavelengths. This happens because the three bands in the spectrometer has overlapping wavelengths. We can fix this using the stitch method of the Spectrum class.
|
6,828
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, pick_types, find_events
from mne.channels import read_layout
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw_files = [read_raw_edf(f, preload=True) for f in raw_fnames]
raw = concatenate_raws(raw_files)
# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))
# Apply band-pass filter
raw.filter(7., 30.)
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2
from sklearn.lda import LDA # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
# Assemble a classifier
lda = LDA()
csp = CSP(n_components=4, reg=None, log=True)
# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
# Use scikit-learn Pipeline with cross_val_score function
from sklearn.pipeline import Pipeline # noqa
from sklearn.cross_validation import cross_val_score # noqa
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
evoked = epochs.average()
evoked.data = csp.patterns_.T
evoked.times = np.arange(evoked.data.shape[0])
layout = read_layout('EEG1005')
evoked.plot_topomap(times=[0, 1, 2, 3, 4, 5], ch_type='eeg', layout=layout,
scale_time=1, time_format='%i', scale=1,
unit='Patterns (AU)', size=1.5)
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
lda.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(lda.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classification with linear discrimant analysis
Step2: Look at performance over time
|
6,829
|
<ASSISTANT_TASK:>
Python Code:
# Step by Step version
def search(aList, target):
for v in aList:
if target == v:
return True
return False
# Recursive approach
def searchRecursive(aList, target):
if len(aList) == 0:
return False
if aList[0] == target:
return True
return searchRecursive(aList[1:], target)
max_val = 1000
newList = range(max_val)
newTarget = max_val
%timeit search(aList = newList, target = max_val)
%timeit searchRecursive(aList = newList, target = max_val)
class LinkedNode:
def __init__(self, value):
self.value = value
self.next = None
def search(n, value):
# Base case
if n is None:
return False
# Action and recursive step
if n.value == value:
return True
return search(n.next, value)
def sumList(n):
# base case
if n is None:
return 0
# Action and recursive step
return n.value + sumList(n.next)
class BinaryNode:
def __init__(self, value):
self.value = value
self.left = None
self.value = None
class BinaryTree:
def __init__(self):
self.root = None
class BinaryNode:
# Don't initially know what the left and right nodes
# are
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def add(self, value):
if value <= self.value:
# add to left
self.left = self.addToSubTree(self.left, value)
elif value > self.value:
# add to right
self.right = self.addToSubTree(self.right, value)
def addToSubTree(self, parent, value):
if parent is None:
return BinaryNode(value)
parent.add(value)
return parent
def remove(self, value):
if value < self.value:
self.left = self.removeFromParent(self.left, value)
elif value > self.value:
self.right = self.removeFromParent(self.right, value)
else:
# what if left subtree is empty
if self.left is None:
return self.right
# find the largest value in the left subtree
child = self.left
while child.right:
child = child.right
# find the largest value in the left subtree
childKey = child.value
self.left = self.removeFromParent(self.left, childKey)
self.value = childKey
return self
def removeFromParent(self, parent, value):
if parent:
return parent.remove(value)
return None
class BinaryTree:
def __init__(self):
self.root = None
# add value to BT
def add(self, value):
if self.root == None:
self.root = BinaryNode(value)
else:
self.root.add(value)
# remove value from BT
def remove(self, value):
if self.root is not None:
self.root = BinaryNode(value)
else:
self.root.add(value)
def __contains__(self, target):
node = self.root
while node is not None:
if target < node.value:
node = node.left
elif target > node.value:
node = node.right
else:
return True
return False
b = BinaryTree()
b.root
b.add(7)
b.root
b.root.value
b.add(1)
b.root.value
b.root.right
1 in b
b = BinaryTree()
b.add(1)
b.root.value
1 in b
b.remove(1)
b
1 in b
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Comments
Step2: Implement the Binary Tree
|
6,830
|
<ASSISTANT_TASK:>
Python Code:
print "Hello", "World!"
print "Tips 3: Use \ to escape an characters like \""
print "Tips 4: Use \\n \n to make a newline character"
print '''Tips 5: Use three \' to
make
multiple
line
'''
time.sleep(0.5);
print "Too bad"
import time
time.sleep(0.5);
print "Now its work"
print "We delete the time object to unload it from memory"
del time
time.sleep(0.5);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The magic command (ipython specific)
|
6,831
|
<ASSISTANT_TASK:>
Python Code:
d = cmudict.dict()
def readability_ease(num_sentences, num_words, num_syllables):
asl = num_words / num_sentences
asw = num_syllables / num_words
return(206.835 - (1.015 * asl) - (84.6 * asw))
def readability_ease_interpretation(x):
if 90 <= x:
res = "5th grade] "
res += "Very easy to read. Easily understood by an average 11-year-old student."
elif 80 <= x < 90:
res = "6th grade] "
res += "Easy to read. Conversational English for consumers."
elif 70 <= x < 80:
res = "7th grade] "
res += "Fairly easy to read."
elif 60 <= x < 70:
res = "8th & 9th grade] "
res += "Plain English. Easily understood by 13- to 15-year-old students."
elif 50 <= x < 60:
res = "10th to 12th grade] "
res += "Fairly difficult to read."
elif 30 <= x < 50:
res = "College] "
res += "Difficult to read."
elif 0 <= x < 30:
res = "College Graduate] "
res += "Very difficult to read. Best understood by university graduates."
print("[{:.1f}|{}".format(x, res))
def count_syllables(word):
w = word.lower()
if w in d:
return([len(list(y for y in x if isdigit(y[-1]))) for x in d[w]][0])
else:
return(-1)
def flesch_reading_ease(text, show_details = False):
## Preprocessing
text = text.lower()
sentences = nltk.tokenize.sent_tokenize(text)
words = nltk.wordpunct_tokenize(re.sub('[^a-zA-Z_ ]', '',text))
syllables = [count_syllables(word) for word in words]
## Count sentences, words, and syllables
## Skip words that do not exist in dictionary
num_sentences = len(sentences)
num_unknown_words = syllables.count(-1)
num_words = len(words) - num_unknown_words
num_syllables = sum([s for s in syllables if s > 0])
## Calculate
if (num_sentences == 0 or num_words == 0): return None
fre = readability_ease(num_sentences, num_words, num_syllables)
if show_details:
return {
"sentence" : num_sentences,
"word" : num_words,
"syllables" : num_syllables,
"unknown": num_unknown_words
}
return(fre)
text = "Hello world, how are you? I am great. Thank you for asking!"
flesch_reading_ease(text, show_details = True)
# Load my secrets from external file
with open("_api-reddit.yaml", 'r') as stream:
my_secret = yaml.load(stream)
client_id = my_secret['client_id']
client_secret = my_secret['client_secret']
# Create instance of Reddit
reddit = praw.Reddit(user_agent='Comment Extraction (by /u/USERNAME)',
client_id=client_id, client_secret=client_secret)
def parse_reddit(reddit, the_url):
submission = reddit.submission(url=the_url)
# Replace MoreComments
submission.comments.replace_more(limit=0)
data = []
for comment in submission.comments.list():
data.append(comment.body)
return(data)
the_url = 'https://www.reddit.com/r/AskReddit/comments/5wkkwg/men_of_reddit_whats_the_biggest_im_a_princess_red/'
data = parse_reddit(reddit, the_url)
## Check that it works for 1 comment
text = data[0]
print(text)
print(flesch_reading_ease(text, show_details = True))
fre = flesch_reading_ease(text)
readability_ease_interpretation(fre)
## To data frame
x = [flesch_reading_ease(text) for text in data]
df = pd.DataFrame({"Princess":x})
## Is there NA?
df[df.Princess.isnull()]
data[301]
readability_ease_interpretation(df.Princess.mean())
urls = {
"elif" : "https://www.reddit.com/r/explainlikeimfive/comments/5xle3c/eli5_what_would_happen_if_you_take_a_compass_into/",
"showerthoughts" : "https://www.reddit.com/r/Showerthoughts/comments/5xippf/college_would_be_a_lot_more_affordable_if_they/",
"gaming" : "https://www.reddit.com/r/gaming/comments/5xhpqs/when_the_game_knows_shit_is_about_to_go_down/",
"askreddit" : "https://www.reddit.com/r/AskReddit/comments/5xhvlq/whats_your_favorite_insult_that_does_not_contain/",
"tifu" : "https://www.reddit.com/r/tifu/comments/5xhfwh/tifu_by_being_courteous/"
}
random.seed(10)
df = {}
for k in urls.keys():
data = parse_reddit(reddit, urls[k])
fres = [flesch_reading_ease(text) for text in data]
fres = [fre for fre in fres if fre != None]
df[k] = random.sample(k=75, population=fres)
df = pd.DataFrame(df)
df.plot.kde(xlim=[-6,150])
df.plot.box(vert=False, xlim=[0, 156])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We make a few modifications
Step2: What about Reddit?
Step3: Same thread, different question
Step4: NA are mostly because words are not real
Step5: More threads
|
6,832
|
<ASSISTANT_TASK:>
Python Code:
#Program that solves exercise 1b.
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def gradient_descent(xk,dx_f,gamma):
return xk-gamma*dx_f
def quadratic(a,b,c,x):
return a*x**2+b*x+c
def dx_quadratic(a,b,x):
return 2*a*x+b
#One variable examples
a,b,c = 1,0,1
x = np.linspace(-5,5,101)
quad = quadratic(a,b,c,x)
dx_quad = dx_quadratic(a,b,x)
xk = -3
xk_vec = [xk]
fxk_vec = [quadratic(a,b,c,xk)]
gamma = 0.3
iters = 0
max_iters = 1000
converged = False
while(abs(dx_quadratic(a,b,xk)) > 1e-6 and iters < max_iters):
xk = gradient_descent(xk,dx_quadratic(a,b,xk),gamma)
xk_vec.append(xk)
fxk_vec.append(quadratic(a,b,c,xk))
iters += 1
if(iters < max_iters):
converged = True
print ("Converged: %s" % converged)
print ("Number of iterations for convergence: %d" % iters)
plt.figure(1)
plt.plot(x,quad)
plt.plot(xk_vec,fxk_vec,'o')
plt.legend(["f(x)","GD-iterates"])
plt.show()
def quartic(x):
return (x+4)*(x+1)*(x-1)*(x-3)/14.0 + 0.5
def dx_quartic(x):
return (1.0/14.0)*(4*x**3 + 3*x**2 - 26*x - 1)
x = np.linspace(-5,4,101)
quart = quartic(x)
dx_quart = dx_quartic(x)
xk = -0.02
xk_vec = [xk]
fxk_vec = [quartic(xk)]
gamma = 0.1
iters = 0
max_iters = 200
converged = False
while(abs(dx_quartic(xk)) > 1e-6 and iters < max_iters):
xk = gradient_descent(xk,dx_quartic(xk),gamma)
xk_vec.append(xk)
fxk_vec.append(quartic(xk))
iters += 1
if(iters < max_iters):
converged = True
print ("Converged: %s" % converged)
print ("Number of iterations for convergence: %d" % iters)
plt.figure(1)
plt.plot(x,quart)
plt.plot(xk_vec,fxk_vec,'o')
plt.legend(["f(x)","GD-iterates"])
plt.show()
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#Two variable example
def Z(x,y):
return x**2+10*y**2-1
def grad_Z(x,y):
gZ = np.zeros(2)
gZ[0] = 2*x
gZ[1] = 20*y
return gZ
X = np.arange(-4,4,0.1)
Y = np.arange(-5,5,0.1)
X_, Y_ = np.meshgrid(X, Y)
Z_ = Z(X_,Y_)
fig = plt.figure(2)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X_, Y_, Z_, cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.show()
plt.figure(3)
plt.contour(X_,Y_,Z_,corner_mask=0)
plt.show()
xk = np.zeros(2)
xk[0] = 1.5
xk[1] = 2.3
gamma = 0.05
iters = 0
max_iters = 200
converged = False
while(abs(np.linalg.norm(grad_Z(xk[0],xk[1]))) > 1e-6 and iters < max_iters):
xk = xk - gamma*grad_Z(xk[0],xk[1])
iters += 1
print ("Number of iterations for convergence: %d" % iters)
print(xk)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
#One variable example
N = 100
x0 = np.ones(N)
x1 = 2*np.random.rand(N)
y = 4 + 3*x1 + np.random.randn(N)
#Compute theta and predicted y using normal equations
X = np.c_[x0,x1]
Xt_X_inv = np.linalg.inv(np.dot(X.transpose(),X))
Xt_y = np.dot(X.transpose(),y)
theta_normeqs = np.dot(Xt_X_inv,Xt_y)
#Compute theta using gradient descent
eta = 0.1
max_iters = 100
theta = np.random.randn(2)
diff = 100
iters = 0
while(diff > 1e-10):
gradient = 2.0/float(N) * np.dot(X.transpose(), np.dot(X,theta) - y)
theta = theta-eta*gradient
diff = np.linalg.norm(gradient)
iters += 1
#Output number of iterations before convergence and compare theta computed with GD with theta computed
#using the Normal equations
print("Number of iterations before convergence: %d" % iters)
print(abs(theta_normeqs-theta))
print(theta_normeqs)
print(theta)
#Plot true y and y_predicted
plt.figure(4)
y_pred = theta[0] + theta[1]*x1
plt.plot(x1,y,'ro')
plt.plot(x1,y_pred,'-b')
plt.show()
#Two variable example
N = 100
x0 = np.ones(N)
x1 = 2*np.random.rand(N)
x2 = 2*np.random.rand(N)
noise_scale = 2
g_noise = noise_scale*np.random.randn(N)
y = 4+3*x1+2*x2+g_noise
#Compute theta using normal equations
X = np.c_[x0,x1,x2]
Xt_X_inv = np.linalg.inv(np.dot(X.transpose(),X))
Xt_y = np.dot(X.transpose(),y)
theta_normeqs = np.dot(Xt_X_inv,Xt_y)
#Compute theta using gradient descent
eta = 0.1
max_iters = 100
theta = np.random.randn(3)
diff = 100
iters = 0
while(diff > 1e-10):
gradient = 2.0/float(N) * np.dot(X.transpose(), np.dot(X,theta) - y)
theta = theta-eta*gradient
diff = np.linalg.norm(gradient)
iters += 1
#Output number of iterations before convergence and compare theta computed with GD with theta computed
#using the Normal equations
print("Number of iterations before convergence: %d" % iters)
print(abs(theta_normeqs-theta))
print(theta_normeqs)
print(theta)
#Plot true y and y_predicted
y_pred = theta[0] + theta[1]*x1+theta[2]*x2
fig = plt.figure(5)
ax = fig.gca(projection='3d')
scatter1 = ax.scatter(x1, x2, y,marker='^',c='r')
scatter2 = ax.scatter(x1, x2, y_pred,marker='o',c='b')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: c) Quadratic functions as the one in exercise b) are particularly forigiving to work with since they only have one minimum/maximum, which in turn is global. A third order polynomial can have a maximum and a minimum or a saddle point. A fourth order polynomial may have two local minima. The point of the following exercise is to investigate the how GD depends on the initial guess, $x_0$.
Step2: d) In this exercise we will look at function of two variables.
Step3: Exercise 2 (Linear regression)
|
6,833
|
<ASSISTANT_TASK:>
Python Code:
from sympy import var, sin, cos, Matrix, Integer, eye, Function, Rational, exp, Symbol, I, solve, pi, trigsimp, dsolve, sinh, cosh, simplify
from sympy.physics.mechanics import mechanics_printing
mechanics_printing()
var("m1 m2 m3 J1 J2 J3 l1 l2 L1 L2 L0 t g")
q1 = Function("q1")(t)
q2 = Function("q2")(t)
q3 = Function("q3")(t)
x1 = l1*cos(q1)
y1 = l1*sin(q1)
z1 = L0
v1 = x1.diff("t")**2 + y1.diff("t")**2 + z1.diff("t")**2
v1.trigsimp()
x2 = L1*cos(q1) + l2*cos(q1 + q2)
y2 = L1*sin(q1) + l2*sin(q1 + q2)
z2 = L0
v2 = x2.diff("t")**2 + y2.diff("t")**2 + z2.diff("t")**2
v2.trigsimp()
x3 = L1*cos(q1) + L2*cos(q1 + q2)
y3 = L1*sin(q1) + L2*sin(q1 + q2)
z3 = L0 - q3
v3 = x3.diff("t")**2 + y3.diff("t")**2 + z3.diff("t")**2
v3.trigsimp()
ω1 = q1.diff("t")
ω2 = q2.diff("t")
ω3 = 0
K1 = Rational(1, 2)*m1*v1 + Rational(1, 2)*J1*ω1**2
K1
K2 = Rational(1, 2)*m1*v2 + Rational(1, 2)*J2*ω2**2
K2
K3 = Rational(1, 2)*m1*v3 + Rational(1, 2)*J3*ω3**2
K3
U1 = m1*g*z1
U1
U2 = m2*g*z2
U2
U3 = m3*g*z3
U3
K = K1 + K2 + K3
K
U = U1 + U2 + U3
U
L = (K - U).expand().simplify()
L
τ1 = (L.diff(q1.diff(t)).diff(t) - L.diff(q1)).simplify().expand().collect(q1.diff(t).diff(t)).collect(q2.diff(t).diff(t))
τ2 = (L.diff(q2.diff(t)).diff(t) - L.diff(q2)).simplify().expand().collect(q1.diff(t).diff(t)).collect(q2.diff(t).diff(t))
τ3 = (L.diff(q3.diff(t)).diff(t) - L.diff(q3)).simplify().expand().collect(q1.diff(t).diff(t)).collect(q2.diff(t).diff(t))
τ1
τ2
τ3
from scipy.integrate import odeint
from numpy import linspace
def scara(estado, tiempo):
# Se importan funciones necesarias
from numpy import sin, cos, matrix
# Se desenvuelven variables del estado y tiempo
q1, q2, q3, q̇1, q̇2, q̇3 = estado
t = tiempo
# Se declaran constantes del sistema
m1, m2, m3 = 1, 1, 1
J1, J2, J3 = 1, 1, 1
l1, l2 = 0.5, 0.5
L1, L2 = 1, 1
L = 1
g = 9.81
# Se declaran constantes del control
kp1, kp2, kp3 = -30, -60, -60
kv1, kv2, kv3 = -20, -20, -18
# Señales de control nulas
#tau1, tau2, tau3 = 0, 0, 0
# Posiciones a alcanzar
qd1, qd2, qd3 = 1, 1, 1
# Se declaran señales de control del sistema
tau1 = kp1*(q1 - qd1) + kv1*q̇1
tau2 = kp2*(q2 - qd2) + kv2*q̇2
tau3 = kp3*(q3 - qd3) + kv3*q̇3 + m3*g
# Se calculan algunos terminos comunes
λ1 = m1*L1*(l2 + L2)
λ2 = m1*(l2**2 + L2**2)
λ3 = m1*(l1**2 + L1**2)
# Se calculan las matrices de masas, Coriolis,
# y vectores de gravedad, control, posicion y velocidad
M = matrix([[J1 + 2*λ1*cos(q2) + m1*L1**2 + λ2 + λ3, λ1*cos(q2) + λ2, 0],
[λ1*cos(q2) + λ2, J2 + λ2, 0],
[0, 0, m1]])
C = matrix([[-2*q̇1, -q̇2, 0], [q̇1, 0, 0], [0, 0, 0]])
G = matrix([[0], [0], [-m3*g]])
Tau = matrix([[tau1], [tau2], [tau3]])
q = matrix([[q1], [q2], [q3]])
q̇ = matrix([[q̇1], [q̇2], [q̇3]])
# Se calcula la derivada del estado del sistema
qp1 = q̇1
qp2 = q̇2
qp3 = q̇3
qpp = M.I*(Tau - C*q̇ - G)
qpp1, qpp2, qpp3 = qpp.tolist()
return [qp1, qp2, qp3, qpp1[0], qpp2[0], qpp3[0]]
t = linspace(0, 10, 1000)
estados_simulados = odeint(func = scara, y0 = [0, 0, 0, 0, 0, 0], t = t)
q1, q2, q3, q̇1, q̇2, q̇3 = list(zip(*estados_simulados.tolist()))
%matplotlib notebook
from matplotlib.pyplot import plot, style, figure
from mpl_toolkits.mplot3d import Axes3D
style.use("ggplot")
fig1 = figure(figsize=(12, 8))
ax1 = fig1.gca()
p1, = ax1.plot(t, q1)
p2, = ax1.plot(t, q2)
p3, = ax1.plot(t, q3)
ax1.legend([p1, p2, p3],[r"$q_1$", r"$q_2$", r"$q_3$"])
ax1.set_ylim(-0.1, 1.2)
ax1.set_xlim(-0.1, 10);
def tras_x(x):
from numpy import matrix
A = matrix([[1, 0, 0, x],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
return A
def tras_z(z):
from numpy import matrix
A = matrix([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, z],
[0, 0, 0, 1]])
return A
def rot_z(θ):
from numpy import matrix, sin, cos
A = matrix([[cos(θ), -sin(θ), 0, 0],
[sin(θ), cos(θ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
return A
def cinematica_scara(q1, q2, q3, l1, l2, L):
from numpy import matrix
p0 = matrix([[0], [0], [0], [1]])
p1 = tras_z(L)*p0
p2 = rot_z(q1)*tras_x(l1)*p1
p3 = rot_z(q2)*tras_x(l2)*p2
p4 = tras_z(-q3)*p3
return [[p0.tolist()[0][0], p1.tolist()[0][0], p2.tolist()[0][0], p3.tolist()[0][0], p4.tolist()[0][0]],
[p0.tolist()[1][0], p1.tolist()[1][0], p2.tolist()[1][0], p3.tolist()[1][0], p4.tolist()[1][0]],
[p0.tolist()[2][0], p1.tolist()[2][0], p2.tolist()[2][0], p3.tolist()[2][0], p4.tolist()[2][0]]]
from numpy import pi
τ = 2*pi
xs, ys, zs = cinematica_scara(τ/12, τ/9, 0.5, 1, 1, 1)
fig2 = figure(figsize=(8, 8))
ax2 = fig2.gca(projection='3d')
ax2.plot(xs, ys, zs, "-o")
ax2.set_xlim(-2, 2)
ax2.set_ylim(-2, 2)
ax2.set_zlim(0, 1.5);
%matplotlib inline
def grafica_scara(q1, q2, q3, l1, l2, L):
xs, ys , zs = cinematica_scara(q1, q2, q3, l1, l2, L)
fig = figure(figsize=(8, 8))
ax = fig.gca(projection='3d')
ax.plot(xs, ys, zs, "-o")
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_zlim(0, 1.5);
grafica_scara(τ/12, τ/9, 0.2, 1, 1, 1)
# Se importan widgets de IPython para interactuar con la funcion
from IPython.html.widgets import interact, fixed
# Se llama a la funcion interactiva
interact(grafica_scara, q1=(0, τ), q2=(0, τ), q3=(0, 1.0), l1=fixed(1), l2=fixed(1), L=fixed(1))
from matplotlib import animation
from numpy import arange
# Se define el tamaño de la figura
fig = figure(figsize=(8, 8))
# Se define una sola grafica en la figura y se dan los limites de los ejes x y y
axi = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2), projection='3d')
# Se utilizan graficas de linea para el resorte y amortiguador
robot, = axi.plot([], [], [], "-o", lw=2)
def init():
# Esta funcion se ejecuta una sola vez y sirve para inicializar el sistema
robot.set_data([], [])
return robot
def animate(i):
# Esta funcion se ejecuta para cada cuadro del GIF
# Se obtienen las coordenadas del robot y se meten los datos en su grafica de linea
xs, ys, zs = cinematica_scara(q1[i], q2[i], q3[i], 1, 1, 1)
robot.set_data(xs, ys)
robot .set_3d_properties(zs)
return robot
# Se hace la animacion dandole el nombre de la figura definida al principio, la funcion que
# se debe ejecutar para cada cuadro, el numero de cuadros que se debe de hacer, el periodo
# de cada cuadro y la funcion inicial
ani = animation.FuncAnimation(fig, animate, arange(1, len(q1)), interval=25,
blit=True, init_func=init)
# Se guarda el GIF en el archivo indicado
ani.save('./imagenes/simulacion-scara.gif', writer='imagemagick');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Y declaramos todas las constantes involucradas en este calculo simbolico
Step2: Asi como algunas de las variables de nuestro problema
Step3: Y empezamos con el calculo de la posicion de los centros de masa de los eslabones, asi como su derivada y el cuadrado de la velocidad lineal de cada eslabon
Step4: Declaramos $\omega_i$ como la velocidad angular de cada eslabon
Step5: Y procedemos al calculo de la energía cinética de cada eslabon
Step6: Calculamos tambien la energía potencial de cada eslabon
Step7: Por lo que ya podemos calcular tanto la energía cinética de nuestro sistema como la potencial
Step8: y el Lagrangiano queda
Step9: Por lo que ahora solo tenemos que aplicar la ecuación de Euler-Lagrange para obtener las ecuaciones de movimiento del sistema
Step10: Una vez que tenemos las ecuaciones de movimiento, debemos simular el comportamiento del sistema por medio de la función odeint, y obtener una gráfica de la trayectoria del sistema
Step11: Para utilizar la función odeint, debemos crear una función, que describa la dinámica del sistema
Step12: Y declarar un arreglo con todos los tiempos a simular, mandar a llamar a la función odeint, y listo!
Step13: Desempacamos los elementos que nos entrega odeint
Step14: Importamos la libreria para graficar
Step15: Hacemos la grafica de las trayectorias del sistema, $q_1$, $q_2$ y $q_3$
Step16: Realizamos la cinemática del manipulador para poder graficar en 3D
Step17: Cambiamos el ambiente de matplotlib, para poder graficar interactivamente, y declaramos una función que tome la cinematica del robot y grafique los puntos
Step18: Importamos la libreria para interactuar con los datos y le pasamos la función que grafica
Step19: Para realizar una animación con los datos de la simulación primero importamos la libreria necesaria y creamos la misma grafica dentro del ambiente de animación
|
6,834
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from mne import io, pick_types, read_events, Epochs
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.decoding import EpochsVectorizer
from mne.viz import tight_layout
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, method='iir')
events = read_events(event_fname)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Create classification pipeline
clf = make_pipeline(Xdawn(n_components=3),
EpochsVectorizer(),
MinMaxScaler(),
LogisticRegression(penalty='l1'))
# Get the labels
labels = epochs.events[:, -1]
# Cross validator
cv = StratifiedKFold(y=labels, n_folds=10, shuffle=True, random_state=42)
# Do cross-validation
preds = np.empty(len(labels))
for train, test in cv:
clf.fit(epochs[train], labels[train])
preds[test] = clf.predict(epochs[test])
# Classification report
target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
report = classification_report(labels, preds, target_names=target_names)
print(report)
# Normalized confusion matrix
cm = confusion_matrix(labels, preds)
cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis]
# Plot confusion matrix
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Normalized Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters and read data
|
6,835
|
<ASSISTANT_TASK:>
Python Code:
# Import the required packages
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import scipy
# Package imports
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model
def layer_sizes(X, Y, hidden_neurons):
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
n_x = X.shape[0] # size of input layer
n_h = hidden_neurons
n_y = Y.shape[0] # size of output layer
return (n_x, n_h, n_y)
# Solved Exercise: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
W1 = np.random.randn(n_h,n_x) * 0.01
b1 = np.zeros(shape=(n_h,1))
W2 = np.random.randn(n_y,n_h) * 0.01
b2 = np.zeros(shape=(n_y,1))
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def forward_propagation(X, parameters):
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
# Retrieve each parameter from the dictionary "parameters"
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Implement Forward Propagation to calculate A2 (probabilities)
Z1 = np.dot(W1,X)+b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2,A1)+b2
A2 = sigmoid(Z2)
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
def sigmoid(x):
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
s = 1/(1+np.exp(-x))
return s
def compute_cost(A2, Y, parameters):
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
m = Y.shape[1] # number of example
# Compute the cross-entropy cost
logprobs = np.multiply(Y,np.log(A2)) + np.multiply(1-Y,np.log(1-A2))
cost = -1/m * np.sum(logprobs)
cost = np.squeeze(cost) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
def backward_propagation(parameters, cache, X, Y):
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
W1 = parameters["W1"]
W2 = parameters["W2"]
# Retrieve also A1 and A2 from dictionary "cache".
A1 = cache["A1"]
A2 = cache["A2"]
# Backward propagation: calculate dW1, db1, dW2, db2.
dZ2 = A2 - Y
dW2 = 1/m * np.dot(dZ2,A1.T)
db2 = 1/m*np.sum(dZ2,axis=1,keepdims=True)
dZ1 = np.dot(W2.T,dZ2) * (1 - np.power(A1,2))
dW1 = 1/m* np.dot(dZ1,X.T)
db1 = 1/m*np.sum(dZ1,axis=1,keepdims=True)
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
def update_parameters(parameters, grads, learning_rate = 1.2):
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
# Retrieve each parameter from the dictionary "parameters"
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Retrieve each gradient from the dictionary "grads"
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
# Update rule for each parameter
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
np.random.seed(3)
n_x = layer_sizes(X, Y,n_h)[0]
n_y = layer_sizes(X, Y,n_h)[2]
# Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: "n_x, n_h, n_y". Outputs = "W1, b1, W2, b2, parameters".
parameters = initialize_parameters(n_x,n_h,n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X, parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2,Y,parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters,cache,X,Y)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters,grads)
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
def predict(parameters, X):
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
A2, cache = forward_propagation(X,parameters)
predictions = A2 > 0.5
return predictions
from sklearn.datasets.samples_generator import make_circles
x_train, y_train = make_circles(n_samples=1000, noise= 0.05, random_state=3)
plt.figure(figsize=(15, 10))
plt.scatter(x_train[:, 0], x_train[:,1], c=y_train, s=40, cmap=plt.cm.Spectral);
def plot_decision_boundary(model, X, y):
plt.figure(figsize=(15,10))
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y.ravel(), s=80, cmap=plt.cm.Spectral)
# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(x_train.T, np.array([y_train.T]), n_h = 5, num_iterations = 10000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), x_train.T, np.array([y_train.T]))
plt.title("Decision Boundary for hidden layer size " + str(4))
# Print accuracy
predictions = predict(parameters, x_train.T)
print ('Accuracy: %d' % float((np.dot( np.array([y_train.T]),predictions.T) + np.dot(1- np.array([y_train.T]),1-predictions.T))/float( np.array([y_train.T]).size)*100) + '%')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Let's implement our first MLP
Step4: Initialize the model's parameters
Step7: The Loop
Step9: Now that you have computed $A^{[2]}$ (in the Python variable "A2"), which contains $a^{2}$ for every example, you can compute the cost function as follows
Step11: Using the cache computed during forward propagation, you can now implement backward propagation.
Step13: Update Parameters
Step15: Integrate parts [Network structure ,Model Parameters, the loop] in nn_model()
Step17: Predictions
Step18: Test our MLP building blocks in real data
Step19: Expected Output
|
6,836
|
<ASSISTANT_TASK:>
Python Code:
problem1 = {
"left_lines": [
r"HE|LL|O+", # HE|LL|O+ line 1
r"[PLEASE]+", # [PLEASE]+ line 2
],
"right_lines": None,
"top_columns": [
r"[^SPEAK]+", # [^SPEAK]+ column 1
r"EP|IP|EF", # EP|IP|EF column 2
],
"bottom_columns": None,
}
alphabet1 = {
'H', 'E', 'L', 'O',
'P', 'L', 'E', 'A', 'S', 'E',
'S', 'P', 'E', 'A', 'K',
'E', 'P', 'I', 'P', 'I', 'F',
}
print(f"alphabet1 = \n{sorted(alphabet1)}")
problem2 = {
"left_lines": [
r"(N3|TRA|N7)+", # left line 1
r"[1LOVE2?4]+.", # left line 2
r"(A|D)M[5-8$L]+", # left line 3
r"[^\s0ILAD]+", # left line 4
r"[B-E]+(.)\1.", # left line 5
],
"right_lines": [
r"[^OLD\s]+", # right line 1
r"(\d+)[LA\s$?]+", # right line 2
r"(\-P|5\$|AM|Z|L)+", # right line 3
r"(\-D|\-WE)+[^L4-9N$?]+", # right line 4
r"[FED$?]+", # right line 5
],
"top_columns": [
r"[2TAIL\-D]+", # top column 1
r"(WE|R4|RY|M)+", # top column 2
r"[FEAL3-5S]+", # top column 3
r"[^FA\sT1-2]+F", # top column 4
r"[LO\s\?5-8]+", # top column 5
],
"bottom_columns": [
r"[^ILYO]+", # top column 1
r".+[MURDEW]+", # top column 2
r"[1ALF5$E\s]+", # top column 3
r"[\dFAN$?]+", # top column 4
r".+\s.+\?", # top column 5
],
}
import string
alphabet2 = set(string.digits) \
| set(string.ascii_uppercase) \
| { ':', '?', '.', '$', '-' }
print(f"alphabet2 = \n{sorted(alphabet2)}")
problem3 = {
"left_lines": [
r"[ONE]*[SKA]", # left line 1
r".*(RE|ER)", # left line 2
r"A+[TUB]*", # left line 3
],
"right_lines": [
r".*(O|S)*", # right line 1
r"[^GOA]*", # right line 2
r"[STUPA]+", # right line 3
],
"top_columns": [
r".*[GAF]*", # top column 1
r"(P|ET|O|TEA)*", # top column 2
r"[RUSH]+", # top column 3
],
"bottom_columns": [
r"(NF|FA|A|FN)+", # top column 1
r".*(A|E|I).*", # top column 2
r"[SUPER]*", # top column 3
],
}
alphabet3 = {
'O', 'N', 'E', 'S', 'K', 'A',
'R', 'E', 'E', 'R',
'A', 'T', 'U', 'B',
'O', 'S',
'G', 'O', 'A',
'S', 'T', 'U', 'P', 'A',
'G', 'A', 'F',
'P', 'E', 'T', 'O', 'T', 'E', 'A',
'R', 'U', 'S', 'H',
'N', 'F', 'F', 'A', 'A', 'F', 'N',
'A', 'E', 'I',
'S', 'U', 'P', 'E', 'R',
}
print(f"alphabet3 = \n{sorted(alphabet3)}")
def dimension_problem(problem):
m = len(problem['left_lines'])
if problem['right_lines'] is not None:
assert m == len(problem['right_lines'])
n = len(problem['top_columns'])
if problem['bottom_columns'] is not None:
assert n == len(problem['bottom_columns'])
return (m, n)
problem1
dimension_problem(problem1)
___ = "_" # represents an empty answer, as _ is not in the alphabet
grid1_partial = [
[ 'H', ___ ],
[ ___, 'P' ],
]
grid1_solution = [
[ 'H', 'E' ],
[ 'L', 'P' ],
]
grid1_wrong1 = [
[ 'H', 'E' ],
[ 'L', 'F' ],
]
grid1_wrong2 = [
[ 'H', 'E' ],
[ 'E', 'P' ],
]
grid1_wrong3 = [
[ 'H', 'E' ],
[ 'O', 'F' ],
]
grid1_wrong4 = [
[ 'O', 'E' ],
[ 'O', 'F' ],
]
def nth_line(grid, line):
return "".join(grid[line])
def nth_column(grid, column):
return "".join(grid[line][column] for line in range(len(grid)))
[ nth_line(grid1_solution, line) for line in range(len(grid1_solution)) ]
[ nth_column(grid1_solution, column) for column in range(len(grid1_solution[0])) ]
___ = "_" # represents an empty answer, as _ is not in the alphabet
grid3_solution = [
[ 'N', 'O', 'S' ],
[ 'F', 'E', 'R' ],
[ 'A', 'T', 'U' ],
]
___ = "_" # represents an empty answer, as _ is not in the alphabet
grid2_partial = [
[ 'T', 'R', 'A', 'N', '7' ],
[ '2', '4', ___, ___, ' ' ],
[ 'A', ___, ___, ___, ___ ],
[ '-', ___, ___, ___, ___ ],
[ 'D', ___, ___, ___, '?' ],
]
def dimension_grid(grid):
m = len(grid)
n = len(grid[0])
assert all(n == len(grid[i]) for i in range(1, m))
return (m, n)
print(f"Grid grid1_partial has dimension: {dimension_grid(grid1_partial)}")
print(f"Grid grid1_solution has dimension: {dimension_grid(grid1_solution)}")
print(f"Grid grid2_partial has dimension: {dimension_grid(grid2_partial)}")
def check_dimensions(problem, grid):
return dimension_problem(problem) == dimension_grid(grid)
assert check_dimensions(problem1, grid1_partial)
assert check_dimensions(problem1, grid1_solution)
assert not check_dimensions(problem2, grid1_partial)
assert check_dimensions(problem2, grid2_partial)
assert not check_dimensions(problem1, grid2_partial)
def check_alphabet(alphabet, word, debug=True):
result = True
for i, letter in enumerate(word):
new_result = letter in alphabet
if debug and result and not new_result:
print(f"The word {repr(word)} is not in alphabet {repr(alphabet)}, as its #{i}th letter {letter} is not present.")
result = result and new_result
return result
assert check_alphabet(alphabet1, 'H' 'E') # concatenate the strings
assert check_alphabet(alphabet1, 'H' 'E')
assert check_alphabet(alphabet1, 'L' 'P')
assert check_alphabet(alphabet1, 'H' 'L')
assert check_alphabet(alphabet1, 'E' 'P')
assert check_alphabet(alphabet2, "TRAN7")
import re
memory_of_compiled_regexps = dict()
def match(regexp, word, debug=True):
global memory_of_compiled_regexps
if regexp not in memory_of_compiled_regexps:
prog = re.compile(regexp)
memory_of_compiled_regexps[regexp] = prog
print(f"For the first time seeing this regexp {repr(regexp)}, compiling it and storing in memory_of_compiled_regexps, now of size {len(memory_of_compiled_regexps)}.")
else:
prog = memory_of_compiled_regexps[regexp]
result = re.fullmatch(regexp, word)
result = prog.fullmatch(word)
entire_match = result is not None
# entire_match = result.group(0) == word
if debug:
if entire_match:
print(f"The word {repr(word)} is matched by {repr(regexp)}")
else:
print(f"The word {repr(word)} is NOT matched by {repr(regexp)}")
return entire_match
%%time
match(r"(N3|TRA|N7)+", "TRAN7")
%%time
match(r"(N3|TRA|N7)+", "TRAN8")
match(r"(N3|TRA|N7)+", "")
match(r"(N3|TRA|N7)+", "TRA")
%timeit match(r"(N3|TRA|N7)+", "TRA", debug=False)
%timeit re.fullmatch(r"(N3|TRA|N7)+", "TRA")
def check_line(problem, alphabet, word, position, debug=True, early=False):
if not check_alphabet(alphabet, word, debug=debug):
return False
m, n = dimension_problem(problem)
if len(word) != n:
if debug:
print(f"Word {repr(word)} does not have correct size n = {n} for lines")
return False
assert 0 <= position < m
constraints = []
if "left_lines" in problem and problem["left_lines"] is not None:
constraints += [ problem["left_lines"][position] ]
if "right_lines" in problem and problem["right_lines"] is not None:
constraints += [ problem["right_lines"][position] ]
# okay we have one or two constraint for this line,
assert len(constraints) in {1, 2}
# let's check them!
result = True
for cnb, constraint in enumerate(constraints):
if debug:
print(f"For line constraint #{cnb} {repr(constraint)}:")
new_result = match(constraint, word, debug=debug)
if early and not new_result: return False
result = result and new_result
return result
problem1, alphabet1, grid1_solution
n, m = dimension_problem(problem1)
for line in range(n):
word = nth_line(grid1_solution, line)
print(f"- For line number {line}, checking word {repr(word)}:")
result = check_line(problem1, alphabet1, word, line)
n, m = dimension_problem(problem1)
fake_words = ["OK", "HEY", "NOT", "HELL", "N", "", "HU", "OO", "EA"]
for word in fake_words:
print(f"# For word {repr(word)}:")
for line in range(n):
result = check_line(problem1, alphabet1, word, line)
print(f" => {result}")
n, m = dimension_problem(problem2)
for line in [0]:
word = nth_line(grid2_partial, line)
print(f"- For line number {line}, checking word {repr(word)}:")
result = check_line(problem2, alphabet2, word, line)
print(f" => {result}")
n, m = dimension_problem(problem2)
fake_words = [
"TRAN8", "N2TRA", # violate first constraint
"N3N3N7", "N3N3", "TRA9", # smaller or bigger dimension
"O L D", "TRA ", # violate second contraint
]
for word in fake_words:
for line in [0]:
print(f"- For line number {line}, checking word {repr(word)}:")
result = check_line(problem2, alphabet2, word, line)
print(f" => {result}")
def check_column(problem, alphabet, word, position, debug=True, early=False):
if not check_alphabet(alphabet, word, debug=debug):
return False
m, n = dimension_problem(problem)
if len(word) != m:
if debug:
print(f"Word {repr(word)} does not have correct size n = {n} for columns")
return False
assert 0 <= position < n
constraints = []
if "top_columns" in problem and problem["top_columns"] is not None:
constraints += [ problem["top_columns"][position] ]
if "bottom_columns" in problem and problem["bottom_columns"] is not None:
constraints += [ problem["bottom_columns"][position] ]
# okay we have one or two constraint for this column,
assert len(constraints) in {1, 2}
# let's check them!
result = True
for cnb, constraint in enumerate(constraints):
if debug:
print(f"For column constraint #{cnb} {repr(constraint)}:")
new_result = match(constraint, word, debug=debug)
if early and not new_result: return False
result = result and new_result
return result
problem1, alphabet1, grid1_solution
n, m = dimension_problem(problem1)
for column in range(m):
word = nth_column(grid1_solution, column)
print(f"- For column number {column}, checking word {repr(word)}:")
result = check_column(problem1, alphabet1, word, column)
n, m = dimension_problem(problem1)
fake_words = ["OK", "HEY", "NOT", "HELL", "N", "", "HU", "OO", "EA"]
for word in fake_words:
print(f"# For word {repr(word)}:")
for column in range(m):
result = check_column(problem1, alphabet1, word, column)
print(f" => {result}")
n, m = dimension_problem(problem2)
for column in [0]:
word = nth_column(grid2_partial, column)
print(f"- For column number {column}, checking word {repr(word)}:")
result = check_column(problem2, alphabet2, word, column)
print(f" => {result}")
n, m = dimension_problem(problem2)
fake_words = [
"TRAN8", "N2TRA", # violate first constraint
"N3N3N7", "N3N3", "TRA9", # smaller or bigger dimension
"O L D", "TRA ", # violate second contraint
]
for word in fake_words:
for line in [0]:
print(f"- For line number {line}, checking word {repr(word)}:")
result = check_column(problem2, alphabet2, word, line)
print(f" => {result}")
def check_grid(problem, alphabet, grid, debug=True, early=False):
m, n = dimension_problem(problem)
ok_lines = [False] * m
for line in range(m):
word = nth_line(grid, line)
ok_lines[line] = check_line(problem, alphabet, word, line, debug=debug, early=early)
ok_columns = [False] * n
for column in range(n):
word = nth_column(grid, column)
ok_columns[column] = check_column(problem, alphabet, word, column, debug=debug, early=early)
return all(ok_lines) and all(ok_columns)
check_grid(problem1, alphabet1, grid1_partial)
check_grid(problem1, alphabet1, grid1_solution)
check_grid(problem1, alphabet1, grid1_wrong1)
check_grid(problem1, alphabet1, grid1_wrong2)
check_grid(problem1, alphabet1, grid1_wrong3)
check_grid(problem1, alphabet1, grid1_wrong4)
check_grid(problem3, alphabet3, grid3_solution)
import itertools
def all_words_of_alphabet(alphabet, size):
yield from itertools.product(alphabet, repeat=size)
list(all_words_of_alphabet(['0', '1'], 3))
alphabet0 = ['0', '1']
len_alphabet = len(alphabet0)
for k in [2, 3, 4, 5]:
print(f"Generating {len_alphabet**k} words of size = {k} takes about")
%timeit list(all_words_of_alphabet(alphabet0, k))
%timeit list(all_words_of_alphabet(['0', '1', '2', '3'], 10))
len_alphabet = len(alphabet1)
for k in [2, 3, 4, 5]:
print(f"Generating {len_alphabet**k} words of size = {k} takes about")
%timeit list(all_words_of_alphabet(alphabet1, k))
len_alphabet = len(alphabet2)
for k in [2, 3, 4, 5]:
print(f"Generating {len_alphabet**k} words of size = {k} takes about")
%timeit list(all_words_of_alphabet(alphabet2, k))
def all_grids_of_alphabet(alphabet, lines, columns):
all_words = list(itertools.product(alphabet, repeat=columns))
all_words = [ "".join(words) for words in all_words ]
all_grids = itertools.product(all_words, repeat=lines)
for pre_tr_grid in all_grids:
tr_grid = [
[
pre_tr_grid[line][column]
for line in range(lines)
]
for column in range(columns)
]
yield tr_grid
for alphabet in ( ['0', '1'], ['T', 'A', 'C', 'G'] ):
for (n, m) in [ (1, 1), (2, 2), (1, 2), (2, 1), (3, 3), (3, 2), (2, 3) ]:
assert len(list(all_grids_of_alphabet(alphabet, n, m))) == len(alphabet)**(n*m)
print(list(all_grids_of_alphabet(alphabet0, n, m))[0])
print(list(all_grids_of_alphabet(alphabet0, n, m))[-1])
print(f"For the alphabet {alphabet0} of size = {len(alphabet0)} :")
for (n, m) in [ (1, 1), (2, 1), (1, 2), (2, 2) ]:
%time all_these_grids = list(all_grids_of_alphabet(alphabet0, n, m))
print(f"For (n, m) = {(n, m)} the number of grids is {len(all_these_grids)}")
print(f"For the alphabet {alphabet1} of size = {len(alphabet1)} :")
for (n, m) in [ (1, 1), (2, 1), (1, 2), (2, 2) ]:
%time all_these_grids = list(all_grids_of_alphabet(alphabet1, n, m))
print(f"For (n, m) = {(n, m)} the number of grids is {len(all_these_grids)}")
print(f"For the alphabet {alphabet2} of size = {len(alphabet2)} :")
for (n, m) in [ (1, 1), (2, 1), (1, 2), (2, 2) ]:
%time all_these_grids = list(all_grids_of_alphabet(alphabet2, n, m))
print(f"For (n, m) = {(n, m)} the number of grids is {len(all_these_grids)}")
41**(2*3)
n, m = 5, 5
41**(5*5)
import math
math.log10(41**(5*5))
s = 7
estimate_of_running_time = 7*s * len(alphabet1)**(5*5) / len(alphabet1)**(2*2)
estimate_of_running_time # in seconds
math.log10( estimate_of_running_time / (60*60*24*365) )
def naive_solve(problem, alphabet, debug=False, early=True):
n, m = dimension_problem(problem)
good_grids = []
for possible_grid in all_grids_of_alphabet(alphabet, n, m):
is_good_grid = check_grid(problem, alphabet, possible_grid, debug=debug, early=early)
if is_good_grid:
if early:
return [ possible_grid ]
good_grids.append(possible_grid)
return good_grids
%%time
good_grids1 = naive_solve(problem1, alphabet1, debug=False, early=True)
print(f"For problem 1\n{problem1}\nOn alphabet\n{alphabet1}\n==> We found one solution:\n{good_grids1}")
%%time
good_grids1 = naive_solve(problem1, alphabet1, debug=False, early=False)
print(f"For problem 1\n{problem1}\nOn alphabet\n{alphabet1}\n==> We found these solutions:\n{good_grids1}")
%%time
good_grids3 = naive_solve(problem3, alphabet3, debug=False, early=True)
print(f"For problem 3\n{problem3}\nOn alphabet\n{alphabet3}\n==> We found one solution:\n{good_grids3}")
%%time
good_grids2 = naive_solve(problem2, alphabet2, debug=False, early=True)
print(f"For problem 2\n{problem2}\nOn alphabet\n{alphabet2}\n==> We found one solution:\n{good_grids2}")
n, m = dimension_problem(problem1)
problem1
alphabet1
len(list(all_words_of_alphabet(alphabet1, n)))
["".join(word) for word in list(all_words_of_alphabet(alphabet1, n))][:10]
[
[ "".join(word)
for word in all_words_of_alphabet(alphabet1, n)
if check_line(problem1, alphabet1, "".join(word), line, debug=False, early=True)
]
for line in range(m)
]
[
[ "".join(word)
for word in all_words_of_alphabet(alphabet1, m)
if check_column(problem1, alphabet1, "".join(word), column, debug=False, early=True)
]
for column in range(n)
]
from tqdm.notebook import trange, tqdm
def smart_solve(problem, alphabet, debug=False, early=True):
n, m = dimension_problem(problem)
good_grids = []
possible_words_for_lines = [
[ "".join(word)
for word in all_words_of_alphabet(alphabet, n)
if check_line(problem, alphabet, "".join(word), line, debug=False, early=True)
# TODO don't compute this "".join(word) twice?
]
for line in range(m)
]
number_of_combinations = 1
for line in range(m):
number_of_combinations *= len(possible_words_for_lines[line])
print(f"- There are {len(possible_words_for_lines[line])} different words for line #{line}")
print(f"=> There are {number_of_combinations} combinations of words for lines #{0}..#{m-1}")
for possible_words in tqdm(
list(itertools.product(*possible_words_for_lines)),
desc="lines"
):
if debug: print(f" Trying possible_words from line constraints = {possible_words}")
column = 0
no_wrong_column = True
while no_wrong_column and column < n:
word_column = "".join(possible_words[line][column] for line in range(m))
if debug: print(f" For column #{column}, word = {word_column}, checking constraint...")
if not check_column(problem, alphabet, word_column, column, debug=False, early=True):
# this word is NOT valid for this column, so let's go to the next word
if debug: print(f" This word {word_column} is NOT valid for this column {column}, so let's go to the next word")
no_wrong_column = False
# break: this was failing... broke the outer for-loop and not the inner one
column += 1
if no_wrong_column:
print(f" These words seemed to satisfy the column constraints!\n{possible_words}")
# so all columns are valid! this choice of words is good!
possible_grid = [
list(word) for word in possible_words
]
print(f"Giving this grid:\n{possible_grid}")
# let's check it, just in case (this takes a short time, compared to the rest)
is_good_grid = check_grid(problem, alphabet, possible_grid, debug=debug, early=early)
if is_good_grid:
if early:
return [ possible_grid ]
good_grids.append(possible_grid)
# after the outer for loop on possible_words
return good_grids
grid1_solution
%%time
good_grids1 = smart_solve(problem1, alphabet1)
good_grids1
%%time
#assert False # uncomment when ready
good_grids3 = smart_solve(problem3, alphabet3)
good_grids3
%%time
#assert False # uncomment when ready
good_grids2 = smart_solve(problem2, alphabet2)
good_grids2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The keys "right_lines" and "bottom_columns" can be empty, as for easier problems there are no constraints on the right and bottom.
Step2: Difficult problem of size $(5,5)$ with 20 constraints
Step3: And its alphabet
Step4: An intermediate problem of size $(3,3)$ with 12 constraints
Step5: And its alphabet
Step6: A few useful functions
Step7: Now let's write a representation of a grid, a solution (or partial solution) of a problem
Step8: As well as a few complete grids which are NOT solutions
Step9: We also write these short functions to extract the $i$-th line or $j$-th column
Step10: A partial solution for the intermediate problem
Step11: And a partial solution for the harder problem
Step12: Let's extract the dimension of a grid, just to check it
Step13: Two more checks
Step14: And also check that a word matches a regexp
Step15: As the documentation explains it
Step16: Now we are ready to write our "smart" match function
Step17: Let's compare the time of the first match and next ones
Step18: Well of course it's not different for tiny test like this.
Step19: That should be enough to start the first "easy" task.
Step20: We can see that our "memoization trick" indeed helped to speed-up the time required to check a regexp, by about a factor 2, even for very small tests like this.
Step21: Let's try it!
Step22: That was long, but it works fine!
Step23: For columns
Step24: Let's try it!
Step25: That was long, but it works fine!
Step26: Second easy task
Step27: Let's try it!
Step28: For a complete grid, let's check that our solution is valid
Step29: And let's also check that the few wrong solutions are indeed not valid
Step30: We can see that for each wrong grid, at least one of the contraint is violated!
Step31: For the hard problem
Step32: Just a quick check
Step33: The time and memory complexity of this function should be $\mathcal{O}(|\Sigma|^k)$ for words of size $k\in\mathbb{N}^*$.
Step34: We can quickly check that even for the larger alphabet of size ~40, it's quite quick for small words of length $\leq 5$
Step35: Who, it takes 12 seconds to just generate all the possible words for the largest problem (which is just of size $(5,5)$)...
Step36: How long does it take and how many grids for the easy problem?
Step37: That's still pretty small and fast!
Step38: Just for $(n, m) = (2, 2)$ it takes about 7 seconds...
Step39: For a grid of size $(5,5)$, the number of different possible grids is about $10^{40}$, that is CRAZY large, we have no hope of solving this problem with a brute force approach.
Step40: This rough estimate gives about $5 * 10^{22}$ seconds, about $10^{15}$ years, so about a million of billion years !
Step41: First difficult task
Step42: Let's try it!
Step43: Then can we find more solutions?
Step44: No there is indeed a unique solution here for the first "easy" problem!
Step45: That was so long...
Step46: My first idea was to try to tackle each constraint independently, and generate the set of words that satisfy this contraint. (by naively checking check(constraint, word) for each word in $\Sigma^n$ or $\Sigma^m$).
Step47: So let's write this algorithm.
Step48: And let's try it
Step49: So it worked!
Step50: 🚀 It was also BLAZING fast compared to the naive approach
|
6,837
|
<ASSISTANT_TASK:>
Python Code:
import healpix_util as hu
import astropy as ap
import numpy as np
from astropy.io import fits
from astropy.table import Table
import astropy.io.ascii as ascii
from astropy.constants import c
import matplotlib.pyplot as plt
import math
import scipy.special as sp
sdssq=ascii.read("/home/rohin/Desktop/healpix/SDSS_quasar_radecz.dat")
sdssq
sdssq.sort("z")
newcol=sdssq.Column(name='pix',data=pixdata)
sdssq.add_column(newcol)
NSIDE=128
dt72hpix=hu.HealPix("ring",NSIDE)
j=0
for i in range(1,25):
pixdata = open("/home/rohin/Desktop/healpix/binned3/pixdata%d_%d.dat"%(NSIDE,i),'w')
pixdata.write("ra\t dec\t z\t pix \n")
#for j in range(len(sdssdr72)):
while sdssq[j]['z']<0.2*i:
pixdata.write("%f\t" %sdssq[j]['RA'])
pixdata.write("%f\t" %sdssq[j]['Dec'])
pixdata.write("%f\t" %sdssq[j]['z'])
pixdata.write("%d\n" %dt72hpix.eq2pix(sdssq[j]['RA'],sdssq[j]['Dec']))
#print dt72hpix.eq2pix(sdssdr72[j]['ra'],sdssdr72[j]['dec'])
j=j+1
pixdata.close()
pixdata = ascii.read("/home/rohin/Desktop/healpix/binned3/pixdata128_9.dat")
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
hpixdata
hu.mollview(hpixdata,rot=180)
blah=hu.read_map("/home/rohin/Desktop/healpix/rast_sdss_dr72safe0_nside512.fits")
hu.mollview(blah,rot=180)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the data file
Step2: Create a healpix map with NSIDE=64 (no. of pixels = 49152 as $NPIX=12\times NSIDE^2$) because the no. of galaxies in the survey are less. For higher resolution (later for dr12) we will consider NSIDE=512 or even 1024. For now, we will create a 64 NSIDE map.
Step3: We have data of galaxies with redshifts between 0 and 0.5 ($0<z<0.5$). To look at a time slice/at a certain epoch we need to choose the list of galaxies within a redshift window. As, measurement of redshift has $\pm 0.05$ error. We can bin all the data into redshifts with range limited to 0.05 variation each. So, we have 10 databins with almost identical redshifts. We save each databin in a different file.
|
6,838
|
<ASSISTANT_TASK:>
Python Code:
def c2(masses, radii, e, a, rotation=None):
f = (1.0 - e**2)**-2
g = (8.0 + 12.0*e**2 + e**4)*f**(5.0/2.0) / 8.0
if rotation == None:
omega_ratio_sq = 0.0
elif rotation == 'synchronized':
omega_ratio_sq = (1.0 + e)/(1.0 - e)**3
else:
omega_ratio_sq = 0.0
c2_0 = (omega_ratio_sq*(1.0 + masses[1]/masses[0])*f + 15.0*g*masses[1]/masses[0])*(radii[0]/a)**5
c2_1 = (omega_ratio_sq*(1.0 + masses[0]/masses[1])*f + 15.0*g*masses[0]/masses[1])*(radii[1]/a)**5
return c2_0, c2_1
# parameters for the orbit of Ba/Bb
e = 0.0015
a = 7.249776
masses = [0.984, 0.944]
# c2_B = c2(masses, radii, e, a)
import numpy as np
trk_Ba = np.genfromtxt('/Users/grefe950/evolve/dmestar/trk/gs98/p000/a0/amlt1884/m0980_GS98_p000_p0_y28_mlt1.884.trk')
trk_Bb = np.genfromtxt('/Users/grefe950/evolve/dmestar/trk/gs98/p000/a0/amlt1884/m0940_GS98_p000_p0_y28_mlt1.884.trk')
from scipy.interpolate import interp1d
log10_age = np.arange(6.0, 8.0, 0.01) # log10(age/yr)
ages = 10**log10_age
icurve = interp1d(trk_Ba[:,0], trk_Ba, kind='linear', axis=0)
new_trk_Ba = icurve(ages)
icurve = interp1d(trk_Bb[:,0], trk_Bb, kind='linear', axis=0)
new_trk_Bb = icurve(ages)
mean_trk_B = np.empty((len(ages), 3))
for i, age in enumerate(ages):
c2s = c2(masses, [10**new_trk_Ba[i, 4], 10**new_trk_Bb[i, 4]], e, a,
rotation='synchronized')
avg_k2 = (c2s[0]*new_trk_Ba[i, 10] + c2s[1]*new_trk_Bb[i, 10])/(sum(c2s))
mean_trk_B[i] = np.array([age, 10**new_trk_Ba[i, 4] + 10**new_trk_Bb[i, 4], avg_k2])
e2 = 0.2652
a2 = 66.2319
masses_2 = [1.44, 1.928]
trk_A = np.genfromtxt('/Users/grefe950/evolve/dmestar/trk/gs98/p000/a0/amlt1884/m1450_GS98_p000_p0_y28_mlt1.884.trk',
usecols=(0,1,2,3,4,5,6,7,8,9,10))
icurve = interp1d(trk_A[:,0], trk_A, kind='linear', axis=0)
new_trk_A = icurve(ages)
cl_apsidal_motion_rate = np.empty((len(ages), 2))
for i, age in enumerate(ages):
c2_AB = c2(masses_2, [10**new_trk_A[i, 4], a + 0.5*mean_trk_B[i, 1]], e2, a2)
cl_apsidal_motion_rate[i] = np.array([age, 360.0*(c2_AB[0]*new_trk_A[i, 10] + c2_AB[1]*mean_trk_B[i, 2])])
GR_apsidal_motion_rate = 5.45e-4*(sum(masses)/33.945)**(2./3.) / (1.0 - e2**2) # Giménez (1985)
GR_apsidal_motion_rate
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(8., 8.), sharex=True)
ax.grid(True)
ax.tick_params(which='major', axis='both', length=15., labelsize=18.)
ax.set_xlabel('Age (yr)', fontsize=20., family='serif')
ax.set_ylabel('Apsidal Motion Rate (deg / cycle)', fontsize=20., family='serif')
ax.plot([1.0e6, 1.0e8], [0.0215, 0.0215], '--', lw=1, c='#555555')
ax.plot([1.0e6, 1.0e8], [0.0255, 0.0255], '--', lw=1, c='#555555')
ax.semilogx(cl_apsidal_motion_rate[:, 0], cl_apsidal_motion_rate[:, 1], '-', lw=2, c='#b22222')
icurve = interp1d(cl_apsidal_motion_rate[:,1], cl_apsidal_motion_rate[:,0], kind='linear')
print icurve(0.0235)/1.0e6, icurve(0.0255)/1.0e6, icurve(0.0215)/1.0e6
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What complicates the issue is that the interior structure constants for the B components also vary as a function of age, so we need to compute a mean mass track using the $c_2$ coefficients and the individual $k_2$ values.
Step2: Create tracks with equally spaced time steps.
Step3: Now, compute the $c_2$ coefficients for each age.
Step4: With that, we have an estimate for the mean B component properties as a function of age. One complicating factor is the "radius" of the average B component. If we are modeling the potential created by the Ba/Bb components as that of a single star, we need to assume that the A component never enters into any region of the combined potential that is dominated by either component.
Step5: We are now in a position to compute the classical apsidal motion rate from the combined A/B tracks.
Step6: One can see from this that the general relativistic component is a very small contribution to the total apsidal motion of the system.
Step7: How sensitive is this to the properties of the A component, which are fairly uncertain?
|
6,839
|
<ASSISTANT_TASK:>
Python Code:
# Now convert all text to index using vocab!
corpus = np.array(list(map(vocab.get, data)))
print ("Type of 'corpus' is %s, shape is %s, and length is %d"
% (type(corpus), corpus.shape, len(corpus)))
check_len = 10
print ("\n'corpus' looks like %s" % (corpus[0:check_len]))
for i in range(check_len):
_wordidx = corpus[i]
print ("[%d/%d] chars[%02d] corresponds to '%s'"
% (i, check_len, _wordidx, chars[_wordidx]))
# Generate batch data
batch_size = 50
seq_length = 200
num_batches = int(corpus.size / (batch_size * seq_length))
# First, reduce the length of corpus to fit batch_size
corpus_reduced = corpus[:(num_batches*batch_size*seq_length)]
xdata = corpus_reduced
ydata = np.copy(xdata)
ydata[:-1] = xdata[1:]
ydata[-1] = xdata[0]
print ('xdata is ... %s and length is %d' % (xdata, xdata.size))
print ('ydata is ... %s and length is %d' % (ydata, xdata.size))
print ("")
# Second, make batch
xbatches = np.split(xdata.reshape(batch_size, -1), num_batches, 1)
ybatches = np.split(ydata.reshape(batch_size, -1), num_batches, 1)
print ("Type of 'xbatches' is %s and length is %d"
% (type(xbatches), len(xbatches)))
print ("Type of 'ybatches' is %s and length is %d"
% (type(ybatches), len(ybatches)))
print ("")
# How can we access to xbatches??
nbatch = 5
temp = xbatches[0:nbatch]
print ("Type of 'temp' is %s and length is %d"
% (type(temp), len(temp)))
for i in range(nbatch):
temp2 = temp[i]
print ("Type of 'temp[%d]' is %s and shape is %s" % (i, type(temp2), temp2.shape,))
# Important RNN parameters
vocab_size = len(vocab)
rnn_size = 128
num_layers = 2
grad_clip = 5.
# Construct RNN model
unitcell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
cell = tf.nn.rnn_cell.MultiRNNCell([unitcell] * num_layers)
input_data = tf.placeholder(tf.int32, [batch_size, seq_length])
targets = tf.placeholder(tf.int32, [batch_size, seq_length])
istate = cell.zero_state(batch_size, tf.float32)
# Weigths
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [rnn_size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, rnn_size])
inputs = tf.split(1, seq_length, tf.nn.embedding_lookup(embedding, input_data))
inputs = [tf.squeeze(_input, [1]) for _input in inputs]
# Output
def loop(prev, _):
prev = tf.nn.xw_plus_b(prev, softmax_w, softmax_b)
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol).
outputs, last_state = tf.nn.seq2seq.rnn_decoder(inputs, istate, cell
, loop_function=None, scope='rnnlm')
output = tf.reshape(tf.concat(1, outputs), [-1, rnn_size])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
probs = tf.nn.softmax(logits)
# Loss
loss = tf.nn.seq2seq.sequence_loss_by_example([logits], # Input
[tf.reshape(targets, [-1])], # Target
[tf.ones([batch_size * seq_length])], # Weight
vocab_size)
# Optimizer
cost = tf.reduce_sum(loss) / batch_size / seq_length
final_state = last_state
lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
_optm = tf.train.AdamOptimizer(lr)
optm = _optm.apply_gradients(zip(grads, tvars))
print ("Network Ready")
# Train the model!
num_epochs = 50
save_every = 500
learning_rate = 0.002
decay_rate = 0.97
sess = tf.Session()
sess.run(tf.initialize_all_variables())
summary_writer = tf.train.SummaryWriter(save_dir, graph=sess.graph)
saver = tf.train.Saver(tf.all_variables())
init_time = time.time()
for epoch in range(num_epochs):
# Learning rate scheduling
sess.run(tf.assign(lr, learning_rate * (decay_rate ** epoch)))
state = sess.run(istate)
batchidx = 0
for iteration in range(num_batches):
start_time = time.time()
randbatchidx = np.random.randint(num_batches)
xbatch = xbatches[batchidx]
ybatch = ybatches[batchidx]
batchidx = batchidx + 1
# Note that, num_batches = len(xbatches)
# Train!
train_loss, state, _ = sess.run([cost, final_state, optm]
, feed_dict={input_data: xbatch, targets: ybatch, istate: state})
total_iter = epoch*num_batches + iteration
end_time = time.time();
duration = end_time - start_time
if total_iter % 100 == 0:
print ("[%d/%d] cost: %.4f / Each batch learning took %.4f sec"
% (total_iter, num_epochs*num_batches, train_loss, duration))
if total_iter % save_every == 0:
ckpt_path = os.path.join(save_dir, 'model.ckpt')
saver.save(sess, ckpt_path, global_step = total_iter)
# Save network!
print("model saved to '%s'" % (ckpt_path))
print ("Done!! It took %.4f second. " %(time.time() - init_time))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Now, we are ready to make our RNN model with seq2seq
Step3: Run the command line
|
6,840
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pylab
import matplotlib.patches as patches
import matplotlib.path as path
from ipywidgets import interact
import numpy
import sys
import random
import itertools
import time
import scipy.special
import math
pylab.rcParams['figure.figsize'] = 16, 10
pylab.rcParams['image.cmap'] = 'viridis'
try:
sys.path.append('../..')
from cracodile.synthesis import *
from util.visualize import *
print("Crocodile mode")
except ImportError:
print("Stand-alone mode")
# Convolution and FFT helpers
def conv(a, b): return ifft(fft(a) * fft(b))
def coordinates(N):
return numpy.fft.fftshift(numpy.fft.fftfreq(N))
def fft(a):
if len(a.shape) == 1: return numpy.fft.fftshift(numpy.fft.fft(numpy.fft.ifftshift(a)))
elif len(a.shape) == 2: return numpy.fft.fftshift(numpy.fft.fft2(numpy.fft.ifftshift(a)))
def ifft(a):
if len(a.shape) == 1: return numpy.fft.fftshift(numpy.fft.ifft(numpy.fft.ifftshift(a)))
elif len(a.shape) == 2: return numpy.fft.fftshift(numpy.fft.ifft2(numpy.fft.ifftshift(a)))
def pad_mid(a, N):
N0 = a.shape[0]
assert N >= N0
return numpy.pad(a, len(a.shape) * [(N//2-N0//2, (N+1)//2-(N0+1)//2)], mode='constant', constant_values=0.0)
def extract_mid(a, N):
assert N <= a.shape[0]
cx = a.shape[0] // 2
s = N // 2
if N % 2 == 0:
return a[len(a.shape) * [slice(cx - s, cx + s)]]
else:
return a[len(a.shape) * [slice(cx - s, cx + s + 1)]]
def anti_aliasing_function(shape, m, c):
if len(numpy.array(shape).shape) == 0:
mult = 2 - 1/shape/4
return scipy.special.pro_ang1(m, m, c, mult*coordinates(shape))[0]
return numpy.outer(anti_aliasing_function(shape[0], m, c),
anti_aliasing_function(shape[1], m, c))
def coordinates2(N):
N2 = N // 2
if N % 2 == 0:
return numpy.mgrid[-N2:N2, -N2:N2][::-1] / N
else:
return numpy.mgrid[-N2:N2+1, -N2:N2+1][::-1] / N
def _show(a, name, scale, axes):
size = a.shape[0]
if size % 2 == 0:
low,high = -0.5, 0.5 * (size - 2) / size
else:
low,high = -0.5 * (size - 1) / size, 0.5 * (size - 1) / size
low = (low - 1/size/2) * scale
high = (high - 1/size/2) * scale
cax=axes.imshow(a, extent=(low,high,low,high)); axes.set_title(name);
axes.figure.colorbar(cax,shrink=.4,pad=0.025)
def show_grid(grid, name, theta, axes):
return _show(grid, name, theta, axes)
def show_image(img, name, theta, axes):
return _show(img, name, img.shape[0] / theta, axes)
# Helper for marking ranges in a graph
def mark_range(lbl, x0, x1, y0=None, y1=None, ax=None):
if ax is None: ax = pylab.gca()
if y0 is None: y0 = ax.get_ylim()[1]
if y1 is None: y1 = ax.get_ylim()[0]
wdt = ax.get_xlim()[1] - ax.get_xlim()[0]
ax.add_patch(patches.PathPatch(patches.Path([(x0,y0), (x0,y1)]), linestyle="dashed"))
ax.add_patch(patches.PathPatch(patches.Path([(x1,y0), (x1,y1)]), linestyle="dashed"))
if pylab.gca().get_yscale() == 'linear':
lbl_y = (y0*7+y1) / 8
else: # Some type of log scale
lbl_y = (y0**7*y1)**(1/8)
ax.annotate(lbl, (x1+wdt/200, lbl_y))
N = 2000
x = coordinates(N); fx = N * x
ticks = coordinates(10); fticks = N * coordinates(12)
G = numpy.random.rand(N)-0.5
pylab.rcParams['figure.figsize'] = 16, 4
pylab.plot(x, G); pylab.legend(["G"]); pylab.xticks(ticks); pylab.show()
pylab.plot(fx, fft(G).real); pylab.legend(["F[G]"]); pylab.xticks(fticks); pylab.show()
x0 = 0.2; xA = 200/N
yB = N/10
A = numpy.where(numpy.abs(x-x0) <= xA, 1, 0)
B = ifft(numpy.where(numpy.abs(fx) <= yB, 1, 0))
pylab.plot(x, A, x, B.real, x, A*G); pylab.legend(["A", "B", "AG"]);
pylab.xticks(ticks); mark_range("xA", x0-xA,x0+xA); pylab.show();
pylab.plot(fx, fft(A).real, fx, fft(B).real,
fx, fft(conv(B, G)).real); pylab.legend(["F[A]", "F[B]", "F[B*G]"]);
pylab.ylim(-20,30); pylab.xticks(fticks); mark_range("yB",-yB,yB); pylab.show();
pylab.plot(x, conv(B, A*G).real); pylab.legend(["B*AG"]);
pylab.xticks(ticks); mark_range("xA", x0-xA,x0+xA); pylab.show();
pylab.plot(fx, fft(A*conv(B,G)).real); pylab.legend(["F[A(B*G)]"]);
pylab.xticks(fticks); mark_range("yB", -yB,yB); pylab.show();
alpha = 0; xN = 25 / N; yN = yB
n = ifft(pad_mid(anti_aliasing_function(int(yB*2), alpha, 2*numpy.pi*yN*xN), N)).real
pylab.semilogy(x, numpy.abs(n)); pylab.legend(["n"]);
pylab.xticks(ticks); mark_range("$x_n$", -xN,xN); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(n))); pylab.legend(["F[n]"]);
pylab.xticks(fticks); mark_range("$y_n=y_B$", -yN,yN); pylab.show();
xM = xA + xN
m = numpy.where(numpy.abs(x-x0) <= xM, 1, 0)
ideal = conv(n, A*G)
approx = m * conv(n, A*G)
error = (1-m) * conv(n, A*G)
pylab.plot(x, ideal.real, x, approx.real); pylab.legend(["n*AG", "m(n*AG)"]);
pylab.xticks(ticks); mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(x, numpy.abs(error)); pylab.legend(["(1-m)(n*AG)"]);
pylab.xticks(ticks); mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(error))); pylab.legend(["F[(1-m)(n*AG)]"]);
pylab.xticks(fticks); mark_range("$y_n=y_B$", -yN,yN); pylab.show();
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(error)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error))**2)),")")
b = ifft(pad_mid(1/anti_aliasing_function(int(yB*2), alpha, 2*numpy.pi*yN*xN), N)).real
pylab.plot(x, b); pylab.legend(["b"]); pylab.xticks(ticks); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(b)), fx, numpy.abs(fft(conv(b,n)))); pylab.legend(["F[b]", "F[b*n]"]);
pylab.xticks(fticks); mark_range("$y_n=y_B$", -yN,yN); pylab.show();
ideal = conv(B, A*G)
approx = conv(b, m * conv(n, A*G))
error = approx - ideal
pylab.plot(x,ideal.real, x, approx.real); pylab.legend(["B*AG", "b*m(n*AG)"]);
pylab.xticks(ticks); mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(x, numpy.abs(error)); pylab.legend(["b*(1-m)(n*AG)"]);
pylab.xticks(ticks); mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(error))); pylab.legend(["F[b*(1-m)(n*AG)]"]);
pylab.xticks(fticks); mark_range("$y_n=y_B$", -yN,yN); pylab.show();
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(error)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error))**2)),")")
alpha = 0; yN = yB + N * 0.02
pswf = anti_aliasing_function(int(yN*2), alpha, 2*numpy.pi*yN*xN)
n = ifft(pad_mid(pswf, N)).real
b = ifft(pad_mid(1/extract_mid(pswf, int(yB*2)), N)).real
error1 = (1-m) * conv(n, A*G)
error2 = conv(b, (1-m) * conv(n, A*G))
pylab.semilogy(fx, numpy.abs(fft(n)), fx, numpy.abs(fft(b)), fx, numpy.abs(fft(conv(b,n))));
pylab.legend(["F[n]","F[b]", "F[b*n]"]);
pylab.xticks(fticks); mark_range("$y_B$", -yB,yB); mark_range("$y_n$", -yN,yN); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(error1)), fx, numpy.abs(fft(error2))); pylab.legend(["F[(1-m)(n*AG)]", "F[b*(1-m)(n*AG)]"]);
pylab.xticks(fticks); mark_range("$y_B$", -yB,yB); mark_range("$y_n$", -yN,yN); pylab.show();
print("RMSE (w/o b):", numpy.sqrt(numpy.mean(numpy.abs(error1)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error1))**2)),")")
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(error2)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error2))**2)),")")
selM = (numpy.abs(x-x0) <= xM + 1e-13)
def pad_by_sel(sel, x): xp = numpy.zeros(len(sel), dtype=x.dtype); xp[sel] = x; return xp
ideal = conv(n, A*G)
approx = m * conv(n, A*G)
approx_r = pad_by_sel(selM, conv(n[numpy.abs(x) <= xM], (A*G)[selM]))
error1 = approx - ideal
error1_r = approx_r - ideal
pylab.plot(x[selM], ideal[selM].real, x[selM], approx_r[selM].real); pylab.legend(["n*AG", "[r] m(n*AG)"]);
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(fx[selM], numpy.abs(error1_r[selM])); pylab.legend(["[r] (1-m)(n*AG)"]); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(error1)), fx, numpy.abs(fft(error1_r))); pylab.legend(["F[(1-m)(n*AG)]", "[r] F[(1-m)(n*AG)]"]);
mark_range("$y_n$", -yN,yN); mark_range("$y_B$", -yB,yB); pylab.show();
print("RMSE (w/o b):", numpy.sqrt(numpy.mean(numpy.abs(error1)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error1))**2)),")")
print("RMSE (w/o b, reduced):", numpy.sqrt(numpy.mean(numpy.abs(error1_r)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error1_r))**2)),")")
fx_r = N * coordinates(len(approx_r[selM]))
selN = numpy.abs(fx_r) <= yN
approx_core = ifft(fft(approx_r[selM])[selN])
approx_r2 = pad_by_sel(selM, ifft(pad_by_sel(selN, fft(approx_core))))
error1_r2 = ideal - approx_r2
pylab.semilogy(fx, numpy.abs(fft(ideal)),fx_r[selN], numpy.abs(fft(approx_core)));
mark_range("$y_n$", -yN,yN); mark_range("$y_B$", -yB,yB);
pylab.legend(["F[n*AG]", "[r²] F[m(n*AG)]"]); pylab.show();
pylab.plot(x[selM], ideal[selM].real, x[selM], approx_r2[selM].real); pylab.legend(["n*AG", "[r²] m(n*AG)"]);
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(x[selM], numpy.abs(error1_r[selM]), x[selM], numpy.abs(error1_r2[selM]));
pylab.legend(["[r] (1-m)(n*AG)", "[r²] (1-m)(n*AG)"]); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(error1)), fx, numpy.abs(fft(error1_r)), fx, numpy.abs(fft(error1_r2)));
pylab.legend(["F[(1-m)(n*AG)]", "[r] F[(1-m)(n*AG)]", "[r²] F[(1-m)(n*AG)]"]);
mark_range("$y_n$", -yN,yN); mark_range("$y_B$", -yB,yB); pylab.show();
print("RMSE (w/o b):", numpy.sqrt(numpy.mean(numpy.abs(error1)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error1))**2)),")")
print("RMSE (w/o b, reduced):", numpy.sqrt(numpy.mean(numpy.abs(error1_r)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error1_r))**2)),")")
print("RMSE (w/o b, twice reduced):", numpy.sqrt(numpy.mean(numpy.abs(error1_r2)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error1_r2))**2)),")")
pylab.semilogy(x, numpy.abs(conv(b,error1_r)), x, numpy.abs(conv(b,error1_r2)));
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM)
pylab.legend(["[r] b*(1-m)(n*AG)", "[r²] b*(1-m)(n*AG)"]); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(conv(b,error1))), fx, numpy.abs(fft(conv(b,error1_r))), fx, numpy.abs(fft(conv(b,error1_r2))));
pylab.legend(["F[b*(1-m)(n*AG)]", "[r] F[b*(1-m)(n*AG)]", "[r²] F[b*(1-m)(n*AG)]"]);
mark_range("$y_n$", -yN,yN); mark_range("$y_B$", -yB,yB); pylab.show();
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(conv(b,error1))**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(conv(b,error1)))**2)),")")
print("RMSE (reduced):", numpy.sqrt(numpy.mean(numpy.abs(conv(b,error1_r))**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(conv(b,error1_r)))**2)),")")
print("RMSE (twice reduced):", numpy.sqrt(numpy.mean(numpy.abs(conv(b,error1_r2))**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(conv(b,error1_r2)))**2)),")")
Gp = conv(b, G)
ideal = A * conv(n, Gp)
approx = A * conv(n, m * Gp)
error = A * conv(n, (1-m) * Gp)
pylab.plot(x, ideal.real, x, approx.real); pylab.legend(["A(n*G')", "A(n*mG')"]);
pylab.xticks(ticks); mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(x, numpy.abs(error)); pylab.legend(["A(n*(1-m)G')"]); #pylab.ylim(1e-12,1e-7)
pylab.xticks(ticks); mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
pylab.semilogy(fx, numpy.abs(fft(error))); pylab.legend(["F[n*(1-m)G']"]);
pylab.xticks(fticks); mark_range("$y_n$", -yN,yN); mark_range("$y_B$", -yB,yB); pylab.show();
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(error)**2)), "(image:",numpy.sqrt(numpy.mean(numpy.abs(fft(error))**2)),")")
approx_r = conv(n[numpy.abs(x) <= xM], Gp[selM])
pylab.plot(x, conv(n, Gp).real, x, conv(n, m * Gp).real, x[selM], approx_r.real);
pylab.legend(["n*G'", "n*mG'", "[r]n*mG'"]); pylab.ylim(-1,1)
pylab.xticks(ticks); mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM); pylab.show();
print("Samples: %d (%d*%d=%d)" % (N, 1, N, N))
print("Samples (reduced): %d (%.2f*%d=%d, limit %.2f*%d=%d)" % (numpy.sum(selM), 2*xM,N,2*xM*N, 2*xA,N,2*xA*N, ))
print("Samples (twice reduced): %d (%.2f*%d=%d, limit %.2f*%d=%d)" %
(numpy.sum(selN), 2*xM,2*yN,4*xM*yN, 2*xA,2*yB,4*xA*yB))
def overhead_approx(yB, yN, xA, xN):
return numpy.ceil(4*(xA+xN)*yN) / numpy.ceil(4*xA*yB)
def error_approx(yB, yN, xN, alpha=0, dim=1, hexagon=False):
# gridding error
assert yB < yN
pswf = anti_aliasing_function(int(yN)*2, alpha, 2*numpy.pi*yN*xN)
pswf /= numpy.prod(numpy.arange(2*alpha-1,0,-2, dtype=float)) # double factorial
grid_error = numpy.abs(numpy.sum(pswf[::2] - pswf[1::2]))
# correction error
b_error = numpy.abs(pswf[int(yN) + int(yB)])
if dim >= 2 and hexagon:
b_error *= numpy.abs(pswf[int(yN) + int(yB/2)])**(dim-1)
else:
b_error **= dim
return numpy.abs(grid_error) / (2*xM) / b_error
print("Predicted error:", error_approx(yB, yN, xN))
print("Observed worst case:", numpy.max(numpy.abs(fft(conv(b,error1_r2)))))
yNs = 2**numpy.arange(5,20); yBs = yNs*yB/yN; xNs = xN*yN/yNs
errs_approx = numpy.vectorize(error_approx)(yBs, yNs, xNs)
pylab.loglog(yBs, errs_approx); pylab.xlabel('$y_B$'); pylab.show()
xAs = numpy.hstack([numpy.arange(0.01,0.05,0.01), numpy.arange(0.05,0.2,0.05)])
for xA_ in xAs:
pylab.semilogx(yNs, numpy.vectorize(overhead_approx)(yNs*0.75, yNs, xA_, xN*yN/yNs));
pylab.legend(["xA=%.2f" % xA for xA in xAs]); pylab.show()
ov = 1.5
pylab.rcParams['figure.figsize'] = 16, 4
for yN_ in [ 256, 2048]:
xNyNs = numpy.hstack([numpy.arange(0.125, 1, 0.125), numpy.arange(1, 5, 1), numpy.arange(5, 6, 0.125), numpy.arange(6,7,0.25)])
xNs = xNyNs / yN_
for xA_ in xAs:
yBs = (yN_ + xNyNs / xA_) / ov
sel = yBs < yN_
if numpy.sum(sel) == 0: continue
errs_approx = numpy.vectorize(error_approx)(yBs[sel], yN_, xNs[sel], dim=2)
pylab.semilogy(xNyNs[sel], errs_approx)
pylab.ylim(pylab.gca().get_ylim()[0], 1); pylab.xlabel("$x_ny_n$")
pylab.legend(["$x_A=%.2f$" % xA for xA in xAs]); pylab.title("$o=%.1f, y_N=%.1f$" % (ov, yN_)); pylab.show()
yP = yN + yB/2
IIIp_d = numpy.zeros(N)
for i in range(-N // int(yP) // 4+1, (N-1) // int(yP) // 4+1):
IIIp_d[N//2+2*int(yP)*i] = 1
pylab.plot(fx, IIIp_d, fx, fft(m).real, fx, fft(m).real - conv(fft(m), IIIp_d).real)
pylab.legend(["III", "Fm", "Fm-Fm*III"], loc=2)
pylab.ylim(-10,10)
mark_range("yN", -yN, yN); mark_range("yB", -yB, yB); mark_range("yP", -yN-yB, yN+yB); mark_range("2yP", -2*yP, 2*yP)
xM = xA + 2 * xN
mp = conv(m, n).real
pylab.semilogy(x, numpy.maximum(1e-15, numpy.abs(m)));
pylab.semilogy(x, numpy.abs(mp));
pylab.ylim((1e-13, 1e1)); pylab.legend(["m", "m*n"])
mark_range("$x_0+x_A+2x_N$", x0-xM, x0+xM); mark_range("$x_0-x_A-x_N$", x0+xA+xN, x0-xA-xN);
pylab.semilogy(fx, IIIp_d, fx, numpy.abs(fft(mp)), fx, numpy.abs(fft(mp) - conv(fft(mp), IIIp_d)))
mark_range("$y_N$", -yN, yN); mark_range("$y_B$", -yB, yB); mark_range("$y_P$", -yP, yP)
mark_range("$2y_P$", -2*yP, 2*yP); mark_range("$y_B+y_N$\n$=2y_P-y_N$", -yB-yN, yB+yN)
pylab.legend(["III", "Fm'", "Fm'-Fm'*III"], loc=2);
pylab.semilogy(fx, numpy.abs(conv(fft(b) * fft(G), fft(mp) - conv(fft(mp), IIIp_d))),
fx, numpy.abs(fft(n) * conv(fft(b) * fft(G), fft(mp) - conv(fft(mp), IIIp_d))))
mark_range("$y_N$", -yN, yN); mark_range("$y_B$", -yB, yB); mark_range("yP", -yP, yP)
pylab.legend(["$FbFG*(Fm-Fm*III)$", "$Fn(FbFG*(Fm-Fm*III$))"]);
def red_2yP(xs): return extract_mid(xs, int(2*yP))
ref = fft(n) * conv(fft(b) * fft(G), fft(mp))
reduced = red_2yP(fft(n)) * conv(red_2yP(fft(b) * fft(G)), red_2yP(fft(mp)))
pylab.semilogy(red_2yP(fx), numpy.abs(reduced - red_2yP(ref)));
nsubgrid = int(1 / (2*xA)); subgrid_size = int(N * 2*xA)
assert nsubgrid * subgrid_size == N
nfacet = int(N / (2*yB)); facet_size = int(2*yB)
assert nfacet * facet_size == N
print(nsubgrid,"subgrids,",nfacet,"facets")
subgrid = numpy.empty((nsubgrid, subgrid_size), dtype=complex)
facet = numpy.empty((nfacet, facet_size), dtype=complex)
for i in range(nsubgrid):
subgrid[i] = extract_mid(numpy.roll(G, -i * subgrid_size), subgrid_size)
FG = fft(G)
for j in range(nfacet):
facet[j] = extract_mid(numpy.roll(FG, -j * facet_size), facet_size)
yB_size = int(yB*2)
yN_size = int(yN*2)
# Find yP that allows us to align subgrid masks easily (brute force!)
yP_size = int(yP*2)+1
while numpy.abs(yP_size * 2*xA - int(yP_size * 2*xA)) >= 1e-13 or \
numpy.abs(yP_size * 2*xM - int(yP_size * 2*xM)) >= 1e-13:
yP_size+=1
xM_yP_size = int(2*xM*yP_size)
xMxN_yP_size = int(2*(xM+xN)*yP_size)
xMxN_yN_size = int(2*(xM+xN)*yN_size)
pswf = anti_aliasing_function(yN_size, alpha, 2*numpy.pi*yN*xN).real
Fb = 1/extract_mid(pswf, yB_size)
b = ifft(pad_mid(Fb, N))
n = ifft(pad_mid(pswf, N))
Fn = extract_mid(fft(n), yN_size)
FBjFj = numpy.empty((nfacet, yP_size), dtype=complex)
for j in range(nfacet):
FBjFj[j] = pad_mid(facet[j] * Fb, yP_size)
facet_m0 = conv(n, pad_mid(numpy.ones(int(N*xM*2)), N))
facet_m0_trunc = ifft(extract_mid(fft(facet_m0), yP_size))
pylab.semilogy(coordinates(yP_size), numpy.abs(facet_m0_trunc));
mark_range("$x_M$", xM, -xM); mark_range("$x_M+x_N$", -xM-xN, xM+xN);
MiBjFj = numpy.empty((nsubgrid, nfacet, yP_size), dtype=complex)
assert numpy.abs(yP_size * 2*xA - int(yP_size * 2*xA)) < 1e-13
for i in range(nsubgrid):
for j in range(nfacet):
MiBjFj[i,j] = facet_m0_trunc * numpy.roll(ifft(FBjFj[j]), -i*int(yP_size * 2*xA))
Fn = extract_mid(fft(n), yN_size)
NjMiBjFj = numpy.empty((nsubgrid, nfacet, yN_size), dtype=complex)
for i in range(nsubgrid):
for j in range(nfacet):
NjMiBjFj[i,j] = Fn * extract_mid(fft(MiBjFj[i,j]), yN_size) * yP_size / N
fig = pylab.figure(figsize=(16, 8))
ax1, ax2 = fig.add_subplot(211), fig.add_subplot(212)
for i in range(nsubgrid):
Gr = numpy.roll(G, -i * subgrid_size)
for j in range(nfacet):
Grr = ifft(numpy.roll(fft(Gr), -j * facet_size))
ax1.semilogy(yN_size*coordinates(yN_size),
numpy.abs(extract_mid(fft(conv(n, facet_m0 * conv(b, Grr))), yN_size)
- NjMiBjFj[i,j]))
ax2.semilogy(x, numpy.abs(ifft(pad_mid(NjMiBjFj[i,j], N))));
ax1.set_title("Error compared with $n_j * m_i(B_j * G)$")
mark_range("yN", -yN, yN,ax=ax1); mark_range("yB", -yB, yB,ax=ax1);
ax2.set_title("Signal left in grid space")
mark_range("xA", -xA, xA, ax=ax2); mark_range("xM", -xM, xM, ax=ax2); mark_range("xN+xN", -xM-xN, xM+xN, ax=ax2)
assert(numpy.abs(int(yN * xM) - yN * xM) < 1e-13)
assert(numpy.abs(int(1/2/xM) - 1/2/xM) < 1e-13)
xM_yN_size = int(xM*2*yN*2)
RNjMiBjFj = numpy.empty((nsubgrid, nfacet, xM_yN_size), dtype=complex)
RNjMiBjFj[:,:] = NjMiBjFj[:,:,::int(1/2/xM)]
print("Split", nfacet,"*",facet_size, "=", N, "data points into",
nfacet,'*',nsubgrid,'*',xM_yN_size,'=',nsubgrid*nfacet*xM_yN_size, ", overhead", nsubgrid*xM_yN_size/facet_size-1)
for i,j in itertools.product(range(nsubgrid), range(nfacet)):
Grr = ifft(numpy.roll(fft(numpy.roll(G, -i * subgrid_size)), -j * facet_size))
pylab.semilogy(xM*2*coordinates(int(xM*2*N)),
numpy.abs( extract_mid(facet_m0 * conv(conv(n,b), Grr), int(xM*2*N)) -
ifft(pad_mid(RNjMiBjFj[i,j], int(xM*2*N)))))
pylab.title("Error compared with $m_i (B_j * G)$")
mark_range("xA", -xA, xA); mark_range("xM", -xM, xM)
fig = pylab.figure(figsize=(16, 8))
ax1, ax2 = fig.add_subplot(211), fig.add_subplot(212)
err_sum = 0
for i in range(nsubgrid):
approx = numpy.zeros(int(xM*2*N), dtype=complex)
for j in range(nfacet):
approx += numpy.roll(pad_mid(RNjMiBjFj[i,j], int(xM*2*N)), j * int(xM*2*yB*2))
approx = extract_mid(ifft(approx), int(xA*2*N))
ax1.semilogy(xA*2*coordinates(subgrid_size), numpy.abs( approx - subgrid[i] ))
ax2.semilogy(N*coordinates(subgrid_size), numpy.abs( fft(approx - subgrid[i]) ))
err_sum += numpy.abs(approx - subgrid[i])**2
mark_range("xA", -xA, xA, ax=ax1); ax1.set_title("Error compared with $S_i = A_i G$")
mark_range("yB", -yB, yB, ax=ax2); mark_range("yN", -yN, yN, ax=ax2);
pylab.show()
print("MRSE:", numpy.sqrt(numpy.mean(err_sum)))
seed = numpy.random.randint(2**31)
@interact(N=(0,8192),
x0=(-0.5,0.5,0.1), xA=(0,0.5,0.01), xN=(0,0.5,0.01), xM=(0,0.5,0.01),
yN=(0,1024,25), yB=(0,1024,25), alpha=(0,20))
def test_it(N=N, x0=x0, xA=xA, xN=xN, xM=xM, yN=yN, yB=yB, alpha=alpha):
x = coordinates(N); fx = N * x
G = numpy.random.RandomState(seed).rand(N) - .5
A = numpy.roll(pad_mid(numpy.ones(int(2*xA*N)), N), int(x0*N))
m = numpy.roll(pad_mid(numpy.ones(int(2*xM*N)), N), int(x0*N))
selM = (m == 1)
selM0 = pad_mid(numpy.ones(int(2*xM*N),dtype=bool), N)
selN = pad_mid(numpy.ones(int(2*xM*2*yN),dtype=bool), numpy.sum(selM0))
pswf = anti_aliasing_function(int(yN*2), alpha, 2*numpy.pi*yN*xN)
pswf /= numpy.prod(numpy.arange(2*alpha-1,0,-2, dtype=float)) # double factorial
n = ifft(pad_mid(pswf, N)).real
m = conv(n,m).real
b = ifft(pad_mid(1/extract_mid(pswf, int(yB*2)), N)).real
Gp = conv(b, G)
ideal = A * conv(n, Gp)
approx = A * conv(n, m * Gp)
error = A * conv(n, (1-m) * Gp)
approx_r = A[selM] * conv(n[selM0], Gp[selM])
error_r = approx_r - ideal[selM]
approx_core = ifft(fft(n[selM0])[selN] * fft(Gp[selM])[selN])
approx_r2 = A[selM] * ifft(pad_by_sel(selN, fft(approx_core)))
error_r2 = approx_r2 - ideal[selM]
print("PSWF parameter:", 2*numpy.pi*xN*yN)
print("Worst error:", numpy.max(numpy.abs(error_r2)), "(image:", numpy.max(numpy.abs(fft(error))),
", predicted:", error_approx(yB, yN, xN, alpha=alpha),")")
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(error)**2)), "(reduced:", numpy.sqrt(numpy.mean(numpy.abs(error_r)**2)),
", +downsample:", numpy.sqrt(numpy.mean(numpy.abs(error_r2)**2)), ")")
print("RMSE image:", numpy.sqrt(numpy.mean(numpy.abs(fft(error))**2)), "(reduced:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_r))**2)),
", +downsample:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_r2))**2)), ")")
print("Samples:", len(approx_core), "(",2*xM, "*", 2*yN,"=",4*xM*yN, ", overhead: %.2f)" % (xM*yN/xA/yB))
ticks = coordinates(10)
pylab.figure(figsize=(16, 18))
pylab.subplot(4,1,1); pylab.title("Input");
pylab.plot(x, A, x, conv(b,n).real, x, n, x, G, x, m); pylab.legend(["A", "B", "n", "G", "m"]); pylab.xticks(ticks);
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM)
pylab.subplot(4,1,2); pylab.title("Output")
pylab.plot(x, conv(n, Gp).real, x, conv(n, m * Gp).real, x[selM], conv(n[selM0], Gp[selM]).real);
pylab.legend(["n*(b*G)", "n*m(b*G)", "[r]n*m(b*G)"]); pylab.ylim((-0.6,0.6)); pylab.xticks(ticks)
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM);
pylab.subplot(4,1,3); pylab.title("Errors (Grid space)");
pylab.semilogy(x, numpy.abs(n), x, numpy.abs(conv(n, (1-m) * Gp)), x[selM], numpy.abs(error_r), x[selM], numpy.abs(error_r2));
pylab.legend(["n", "n*(1-m)(b*G)", "[r] A(n*(1-m)(b*G))", "[r+d] A(n*(1-m)(b*G))"]); pylab.xticks(ticks);
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_n$", -xN,xN)
pylab.subplot(4,1,4); pylab.title("Errors (Image space)")
pylab.semilogy(fx, numpy.abs(fft(n)), fx, numpy.abs(fft(b)), fx, numpy.abs(fft(conv(n, m * Gp))), fx, numpy.abs(fft(error)),
N*coordinates(len(error_r)), numpy.abs(fft(conv(n[selM0], Gp[selM]))),
N*coordinates(len(error_r)), numpy.abs(fft(error_r)));
pylab.legend(["F[n]", "F[b]", "F[n*m(b*G)]", "F[A(n*(1-m)(b*G))]", "[r] F[n*m(b*G)]", "[r] F[A(n*(1-m)(b*G))]"]);
mark_range("$y_n$", -yN,yN); mark_range("$y_B$", -yB,yB);
pylab.xticks(N*ticks); pylab.show()
seed = numpy.random.randint(2**31)
@interact(N=(0,8192),
x0=(-0.5,0.5,0.1), xA=(0,0.5,0.01), xN=(0,0.5,0.01), xM=(0,0.5,0.01),
yN=(0,1024,25), yB=(0,1024,25), alpha=(0,20))
def test_it(N=N, x0=x0, xA=xA, xN=xN, xM=xM, yN=yN, yB=yB, alpha=alpha):
x = coordinates(N); fx = N * x
G = numpy.random.RandomState(seed).rand(N) - .5
A = numpy.roll(pad_mid(numpy.ones(int(2*xA*N)), N), int(x0*N))
m = numpy.roll(pad_mid(numpy.ones(int(2*xM*N)), N), int(x0*N))
selM = (m == 1)
selM0 = pad_mid(numpy.ones(int(2*xM*N),dtype=bool), N)
selN = pad_mid(numpy.ones(int(2*xM*2*yN),dtype=bool), numpy.sum(selM0))
pswf = anti_aliasing_function(int(yN*2), alpha, 2*numpy.pi*yN*xN)
pswf /= numpy.prod(numpy.arange(2*alpha-1,0,-2, dtype=float)) # double factorial
n = ifft(pad_mid(pswf, N)).real
b = ifft(pad_mid(1/extract_mid(pswf, int(yB*2)), N)).real
m = conv(n,m).real
ideal = conv(n, A*G)
approx = m * conv(n, A*G)
error = (1-m) * conv(n, A*G)
error_b = conv(b, error)
approx_r = conv(n[selM0], (A*G)[selM])
error = ideal - approx
error_r = ideal[selM] - approx_r
approx_core = ifft(fft(approx_r)[selN])
approx_r2 = ifft(pad_by_sel(selN, fft(approx_core)))
error_r2 = ideal[selM] - approx_r2
error_rb = conv(b, pad_by_sel(selM, error_r))
error_r2b = conv(b, pad_by_sel(selM, error_r2))
print("PSWF parameter:", 2*numpy.pi*xN*yN)
print("Worst error:", numpy.max(numpy.abs(error_r2b)), "(image:", numpy.max(numpy.abs(fft(error_r2b))),
", predicted:", error_approx(yB, yN, xN, alpha=alpha),")")
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(error_b)**2)), "(reduced:", numpy.sqrt(numpy.mean(numpy.abs(error_rb)**2)),
", +downsample:", numpy.sqrt(numpy.mean(numpy.abs(error_r2b)**2)), ")")
print("RMSE image:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_b))**2)), "(reduced:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_rb))**2)),
", +downsample:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_r2b))**2)), ")")
print("Samples:", len(approx_core), "(",2*xM, "*", 2*yN,"=",4*xM*yN, ", overhead: %.2f)" % (xM*yN/xA/yB))
# Input graph
ticks = coordinates(10)
pylab.figure(figsize=(16, 18))
pylab.subplot(4,1,1); pylab.title("Input");
pylab.plot(x, A, x, conv(b,n), x, n, x, G, x, m); pylab.legend(["A", "B", "n", "G", "m"]); pylab.xticks(ticks);
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM)
# Output graph
pylab.subplot(4,1,2); pylab.title("Output");
pylab.plot(x, conv(b, ideal), x, conv(b,approx), x, conv(b,pad_by_sel(selM, approx_r2)))
pylab.ylim((-0.5,0.5)); pylab.legend(["B*AG", "b*m(n*aG)", "[r+d] b*m(n*aG)"]);
mark_range("$x_A$", x0-xA,x0+xA); mark_range("$x_m$", x0-xM,x0+xM)
# Error graph (image space)
pylab.subplot(4,1,3); pylab.title("Errors (Grid space)");
pylab.semilogy(x, numpy.abs(n), x, numpy.abs(error), x[selM], numpy.abs(error_r), x[selM], numpy.abs(error_r2), x, numpy.abs(error_r2b));
mark_range("$x_n$", -xN,xN); mark_range("$x_m$", x0-xM,x0+xM)
pylab.legend(["n","(1-m)(n*AG)","[r] (1-m)(n*AG)","[r+d] (1-m)(n*AG)","[r+d] b*(1-m)(n*AG)"]);
pylab.xticks(ticks);
# Error graph (frequency space)
pylab.subplot(4,1,4); pylab.title("Errors (Image space)")
pylab.semilogy(fx, numpy.abs(fft(n)), fx, numpy.abs(fft(b)), fx, numpy.abs(fft(ideal)), fx, numpy.abs(fft(error)),
N*coordinates(len(error_r)), numpy.abs(fft(error_r2)), fx, numpy.abs(fft(error_r2b)));
mark_range("$y_n$", -yN,yN); mark_range("$y_B$", -yB,yB);
pylab.legend(["n", "b", "n*AG", "(1-m)(n*AG)", "[r+d] (1-m)(n*AG)", "[r+d] b*(1-m)(n*AG)"]);
pylab.xticks(N * ticks);
pylab.show()
seed = numpy.random.randint(2**31)
# Subgrid region marker, assuming size to be even
def grid_patch(size):
return patches.Rectangle((-(size+1)/theta/2, -(size+1)/theta/2), size/theta, size/theta, fill=False, linestyle="dashed")
def pad_by_sel_(shape, sel, x):
xp = numpy.zeros(shape, dtype=x.dtype); xp[sel] = x; return xp
@interact(N=(0,1024,128),
x0=(-0.5,0.5,0.1), y0=(-0.5,0.5,0.1), xA=(0,0.5,0.01), xN=(0,0.5,0.01), xM=(0,0.5,0.01),
yN=(0,1024,25), yB=(0,1024,25), alpha=(0,20))
def test_it(N=N, x0=x0, y0=-0.1, xA=xA, xN=xN, xM=xM, yN=yN, yB=yB, alpha=alpha, hexagon=False):
x,y = coordinates2(N)
G = numpy.random.RandomState(seed).rand(N, N) - .5
A = numpy.roll(pad_mid(numpy.ones((int(2*xA*N), int(2*xA*N))), N), (int(y0*N), int(x0*N)), (0,1))
m = numpy.roll(pad_mid(numpy.ones((int(2*xM*N), int(2*xM*N))), N), (int(y0*N), int(x0*N)), (0,1))
A = numpy.where((numpy.abs(x-x0) <= xA) & (numpy.abs(y-y0) <= xA), 1, 0)
m = numpy.where((numpy.abs(x-x0) <= xM) & (numpy.abs(y-y0) <= xM), 1, 0)
selM_1 = pad_mid(numpy.ones(int(2*xM*N), dtype=bool), N)
selM = numpy.ix_(numpy.roll(selM_1, int(y0*N)), numpy.roll(selM_1, int(x0*N)))
selM0 = numpy.ix_(selM_1, selM_1)
selN_1 = pad_mid(numpy.ones(int(2*xM*2*yN),dtype=bool), numpy.sum(selM_1))
selN = numpy.ix_(selN_1, selN_1)
pswf = anti_aliasing_function((int(yN*2),int(yN*2)), alpha, 2*numpy.pi*yN*xN)
pswf /= numpy.prod(numpy.arange(2*alpha-1,0,-2, dtype=float)) # double factorial
n = ifft(pad_mid(pswf, N)).real
b = pad_mid(1/extract_mid(pswf, int(yB*2)), N)
if hexagon: b[numpy.where(numpy.abs(N*x) > yB - numpy.abs(N*y) / 2)] = 0
b = ifft(b).real
m = conv(n,m).real
ideal = conv(n, A*G)
approx = m * conv(n, A*G)
error = (1-m) * conv(n, A*G)
error_b = conv(b, error)
approx_r = conv(n[selM0], (A*G)[selM])
error = ideal - approx
error_r = ideal[selM] - approx_r
approx_core = ifft(fft(approx_r)[selN])
approx_r2 = ifft(pad_by_sel_(approx_r.shape, selN, fft(approx_core)))
error_r2 = ideal[selM] - approx_r2
error_rb = conv(b, pad_by_sel_(ideal.shape, selM, error_r))
error_r2b = conv(b, pad_by_sel_(ideal.shape, selM, error_r2))
print("PSWF parameter:", 2*numpy.pi*xN*yN)
print("Worst error:", numpy.max(numpy.abs(error_r2b)), "(image:", numpy.max(numpy.abs(fft(error_r2b))),
", predicted:", error_approx(yB, yN, xN, alpha=alpha, dim=2, hexagon=hexagon),")")
print("RMSE:", numpy.sqrt(numpy.mean(numpy.abs(error_b)**2)), "(reduced:", numpy.sqrt(numpy.mean(numpy.abs(error_rb)**2)),
", +downsample:", numpy.sqrt(numpy.mean(numpy.abs(error_r2b)**2)), ")")
print("RMSE image:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_b))**2)), "(reduced:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_rb))**2)),
", +downsample:", numpy.sqrt(numpy.mean(numpy.abs(fft(error_r2b))**2)), ")")
print("Samples:", approx_core.shape[0] * approx_core.shape[1], "(",2*xM, "² *", 2*yN,"² =",(4*xM*yN)**2, ", overhead: %.2f)" % (xM*yN/xA/yB)**2)
fig = pylab.figure(figsize=(16,4))
show_grid(numpy.log(numpy.maximum(1e-20,numpy.abs(n))) / numpy.log(10), "n", N, axes=fig.add_subplot(121))
show_grid(numpy.log(numpy.maximum(1e-20,numpy.abs(b))) / numpy.log(10), "b", N, axes=fig.add_subplot(122))
fig = pylab.figure(figsize=(16,4))
show_image(numpy.log(numpy.maximum(1e-20,numpy.abs(fft(n)))) / numpy.log(10), "F[n]", N, axes=fig.add_subplot(121))
show_image(numpy.log(numpy.maximum(1e-20,numpy.abs(fft(b)))) / numpy.log(10), "F[b]", N, axes=fig.add_subplot(122))
fig = pylab.figure(figsize=(16,4))
show_grid(numpy.abs(conv(b,ideal)), "b*n*AG", N, axes=fig.add_subplot(121))
show_grid(numpy.abs(conv(b,pad_by_sel_(ideal.shape, selM, approx_r2))), "[r+d] b*m(n*aG)", N, axes=fig.add_subplot(122))
fig = pylab.figure(figsize=(16,4))
show_grid(numpy.log(numpy.maximum(1e-20,numpy.abs(error))) / numpy.log(10), "log_10 (1-m)(n*AG)", N, axes=fig.add_subplot(131))
show_grid(numpy.log(numpy.maximum(1e-20,numpy.abs(pad_by_sel_(ideal.shape, selM, error_r)))) / numpy.log(10), "[r] log_10 (1-m)(n*AG)", N, axes=fig.add_subplot(132))
show_grid(numpy.log(numpy.maximum(1e-20,numpy.abs(pad_by_sel_(ideal.shape, selM, error_r2)))) / numpy.log(10), "[r+d] log_10 (1-m)(n*AG)", N, axes=fig.add_subplot(133))
fig = pylab.figure(figsize=(16,6))
show_grid(numpy.log(numpy.maximum(1e-20,numpy.abs(fft(error)))) / numpy.log(10), "log_10 F[(1-m)(n*AG)]", N, axes=fig.add_subplot(121))
show_grid(numpy.log(numpy.maximum(1e-20,numpy.abs(fft(error_r2b)))) / numpy.log(10), "[r+d] log_10 F[b*(1-m)(n*AG)]", N, axes=fig.add_subplot(122))
pylab.show()
#show_image(b, "b", 1)
return
xs,ys = coordinates2(len(pswf))
pylab.rcParams['figure.figsize'] = 20, 16
pylab.contour(xs,ys, numpy.log(1/numpy.outer(pswf, pswf)) / numpy.log(10), levels=numpy.arange(0,12))
r = 0.4
pylab.gca().add_patch(patches.Circle((0,0), radius=r, fill=False))
pylab.gca().add_patch(patches.Polygon(r*numpy.transpose([numpy.cos(numpy.arange(6)/6*2*numpy.pi), numpy.sin(numpy.arange(6)/6*2*numpy.pi)]), True, fill=False))
pylab.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem definition
Step2: Now let $A_i$, $B_j$ be grid and facet masks respectively, so $A_i(x) = 0$ iff $\left|x-x_i\right| > x_A$, $\sum_i A_i = 1$
Step3: Note that to cover the entire number space with functions of finite support requires an infinite number of sub-grids and facets. However, in practice this does not really matter, as due to finite sampling we are effectively looking at infinite (repating) equivalence classes of facet/sub-grids anyway. Due to linearity this does not change the argument, so we will use the simpler representation.
Step4: After all, $A_i (B_j \ast G) \ne B_j \ast A_i G$. This has direct impact on how compact we can represent these
Step5: That is pretty close to what we are looking for
Step6: Close enough, this precision is more than enough for our purposes! However, we are not quite finished yet.
Step7: The function $b$ looks pretty wild in grid space, with a lot of high frequency. But that is to be expected to some degree, after all it "undoes" the well-behavedness of $n$. The second diagram shows that we indeed have $b_j\ast n_j = B_j$ as intended.
Step8: Looks okay-ish at first glance. However, let us measure the errors objectively
Step9: A lot worse than we had hoped!
Step10: Note that the base accuracy performance of the wave function has improved a bit simply because we have increased $y_n$ slightly. However, the important part is that we can make much better use of the results
Step11: Reducing the grid-size / image sampling rate has clearly not impacted the precision too much. Note that while the $(1-m)(n*AG)$ error term should be zero in the $x_m$ grid region, with the reduced size the errors get aliased back onto the subgrid. For our parameters this happens to cause an error "well" in the centre of the sub-grid.
Step12: As intended, $n_j\ast a_iG$ drops to zero past $y_N$. This is clearly true even after downsampling, so we can truncate the signal in image space as well. This does not impact signal reconstruction in any visible way.
Step13: The errors have not changed too much even though we have now significantly reduced the number of samples in both grid and image space. The downsampling operation eliminated the incidental "well" in grid space, but $n_j\ast a_iG \approx m_i (n_j \ast a_i G)$ holds just as it did before. This is what we were after!
Step14: And we made it out the other end. Seems the approximation is indeed robust against truncation and down-sampling as intended.
Step15: This is entirely dual to the original method -- we are doing everything backwards. Now $G$ needs to get convolved with $b_j$ right away, and we end with the $A_i$ mask cutting away a bit of grid space.
Step16: Reproduction of the original signal quickly detoriates beyond the $x_A$ region. Especially note how $n\ast mG'$ tends to jump around violently beyond $x_A$. This is even worse for the reduced case, where the error aliases around the $x_m$ edges.
Step17: As expected, modulo some rounding of pixels at the edges. We are not actually that far away from the limits given by information theory. However, this is clearly for a very small data size, so what would happen if we started doing this for larger grids? Would we be able to retain the reached accuracy and efficiency?
Step18: This is clearly quite pessimistic even when comparing against the worst case, but it gives us a decent starting point.
Step19: At the same time, the "overhead" of our solution decreases slowly depending on the sub-grid size $x_A$, eventually approximating $y_N/y_B$. This is of course because the constant grid margin eventually stops mattering
Step20: So how much accuracy can we get for a certain overhead, optimally? If we settle on a certain overhead value $o$, we can calculate $y_n$ from a value of $x_ny_n$
Step21: Facet Split
Step22: The problem here is that $\mathcal Fm$ never approaches zero - it is a sinc function that doesn't fall off very much at all. Therefore we have $\mathcal F m_i(y) \ne \left\operatorname{III}_{2y_P} \ast \mathcal F m_i \right$ no matter how much we restrict the $y$ region.
Step23: However, now this also limits $m$ to $y_N$ in image space
Step24: Good starting point. We just need to figure out how far we can reduce the image size (here $y_P$) without impacting our ability to reconstruct the image. What we need is
Step25: This shows that by using the low-pass-filtered $m'$ we can indeed perform the entire computation using just $2y_P$ sample points
Step26: Note that we are only truncating terms here that we already know to fall to zero. However, the choice of $y_P$ is still quite subtle here, as we need padding to make sure that the convolutions work out in the correct way.
Step27: So let us start by reconstructing sub-grids from facets. First step is to convolve in $b_j$, derived from the PSWF. This is a cheap multiplication in image space. We then pad to $2y_P$ which yields us
Step28: Next we need to cut out the appropriate sub-grid for $m_i$ for the subgrid we want to construct (here
Step29: Note that we would clearly not construct them separately for a real pipeline, as they are simply shifted. Due to the truncation in frequency space these are not quite top-hat functions any more. These terms now get used to extract the sub-grid data from each facet
Step30: Next step is to multiply in $n_j$ in order to un-do the effects of $b$ and cut out the garbage between $y_N$ and $y_P$. This means we arrive at
Step31: Quick mid-point accuracy check against the approximation formula using full resultion. We should be looking at only rounding errors.
Step32: So our final step is to reduce the sampling rate (sub-grid size) to $x_M$. This is the step where we actually introduce the bulk of our error, as the "tail" regions outside $x_M$ get aliased in. As established before, this especially copies the $x_M+x_N$ region inside, which doesn't hurt because we are only interested in the centre $x_A$ part.
Step33: At this point, all that is left is to put together the sum
Step34: Note the pattern of the errors in image space - this is the position dependence of the accuracy pattern from the $b_j$ multiplication. As we stitch together 5 facets, this pattern repeats 5 times.
Step35: 2D case
|
6,841
|
<ASSISTANT_TASK:>
Python Code:
import os # to access system environment variables
import pandas as pd # data management
import seaborn as sns # for data visualization
import matplotlib.pyplot as plt # for plots
import statsmodels.formula.api as smf # for regression output
import datetime as dt # date information
from cassiopeia import riotapi # to access Riot Games API
from cassiopeia.type.core.common import LoadPolicy # to utilize delayed loading policy
# IPython command, puts plots in notebook
%matplotlib inline
print('Last Updated', dt.date.today())
riotapi.set_region("NA") # sets the region to North America (rip Dyrus)
key = os.environ["DEV_KEY"] # grabs my API key from my environment variables
riotapi.set_api_key(key) # my dev key is specific to my account
riotapi.set_load_policy(LoadPolicy.lazy) # lazy -> delays loading certain objects for improved time + data usage
challenger_league = riotapi.get_challenger()
challenger_league
challenger = [entry.summoner for entry in challenger_league]
challenger[0] #summoner.Summoner object of highest rank player in challenger
challenger[0].name
challenger[0].ranked_stats()[None]
challenger[0].ranked_stats()[None].to_json() #returns data in JSON format
df = pd.DataFrame()
for player in challenger:
stats = pd.read_json(player.ranked_stats()[None].to_json(), typ = 'series')
df = df.append(stats, ignore_index=True)
df.head()
df.shape #200 player entries, 56 field columns
df.columns
df.dtypes #all values are floats
df['averageAssists'] = df['totalAssists']/df['totalSessionsPlayed'] # will overwrite the 'averageAssists' field
df['averageKills'] = df['totalChampionKills']/df['totalSessionsPlayed']
df['averageDeaths'] = df['totalDeathsPerSession']/df['totalSessionsPlayed']
df['winRate'] = df['totalSessionsWon']/df['totalSessionsPlayed']
df['averageGoldEarned'] = df['totalGoldEarned']/df['totalSessionsPlayed']
df['averageAssists'].head()
df['winRate'].plot()
df['winRate'].describe()
df['winRate'].skew() # positive skewness -> skewed to the right (skewed to higher winrate)
df['winRate'].kurt() # kurtosis less than 3 -> platykurtic
sns.distplot(df['winRate'], bins=50)
df['kda'] = (df['averageAssists'] + df['averageKills'])/df['averageDeaths']
df['averageTurretsKilled'] = df['totalTurretsKilled']/df['totalSessionsPlayed']
df['averageDamageDifferential'] = (df['totalDamageDealt'] - df['totalDamageTaken'] + df['totalHeal'])/df['totalSessionsPlayed']
df.shape #200 players, 63 columns (7 added, 1 overwritten)
fig, ax = plt.subplots()
ax.scatter(df['winRate'], df['averageGoldEarned'])
ax.set_title('Average Gold Earned vs. Win Rate', loc='left', fontsize=14)
ax.set_xlabel('Win Rate')
ax.set_ylabel('Average Gold Earned')
fig, ax = plt.subplots()
ax.scatter(df['winRate'], df['kda'])
ax.set_title('KDA Ratio vs. Win Rate', loc='left', fontsize=14)
ax.set_xlabel('Win Rate')
ax.set_ylabel('KDA Ratio')
fig, ax = plt.subplots()
ax.scatter(df['winRate'], df['averageTurretsKilled'])
ax.set_title('Average Turrets Killed vs. Win Rate', loc='left', fontsize=14)
ax.set_xlabel('Win Rate')
ax.set_ylabel('Average Turrets Killed')
fig, ax = plt.subplots()
ax.scatter(df['winRate'], df['averageDamageDifferential'])
ax.set_title('Average Damage Differential vs. Win Rate', loc='left', fontsize=14)
ax.set_xlabel('Win Rate')
ax.set_ylabel('Average Damage Differential')
fig, ax = plt.subplots()
ax.scatter(df['winRate'], df['totalSessionsPlayed'])
ax.set_title('Games Played vs. Win Rate', loc='left', fontsize=14)
ax.set_xlabel('Win Rate')
ax.set_ylabel('Games Played')
model = 'winRate ~ averageGoldEarned + kda + averageTurretsKilled + averageDamageDifferential + totalSessionsPlayed'
results = smf.ols(model, data=df).fit()
results.summary()
model2 = 'winRate ~ kda + averageTurretsKilled + totalSessionsPlayed'
results2 = smf.ols(model2, data=df).fit()
results2.summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Challenger Data
Step2: The method below returns a dictionary of ranked statistics for a player based on the champion played. Using the key [None] returns aggregate ranked statistics of all champions played by the summoner. This aggregate data can be pulled in JSON format.
Step3: DataFrame
Step4: Below is a view of the dataframe for the top 5 players. Several of the fields are left as 0.0 as information for them are not recorded for ranked statistics.
Step5: Note
Step6: Data Manipulation
Step7: As an example, the average assists of the top 5 players from the newly included averageAssists series can be seen below.
Step8: Win Rate
Step9: From the descriptive statistics below, it is interesting to note that the win rates of all players in Challenger are above 50%. This makes sense as the ranked positions of all players are reset at the end of each season, and top players must climb the ranked ladder in order to get into Challenger. Thus, successfully winning at least as many games as losses seems like a necessary, but not sufficient condition to make it into Challenger.
Step10: Before moving onto the test model, the normality of the distribution of the win rates is evaluated. With a normally distribution, the skewness is 0 (neither skewed to the left or right) and the kurtosis is 3 ("tailedness" of the distribution). Since the win rate distribution has a positive skewness and a kurtosis less than 3, the distribution is skewed to the right and has a platykurtic distribution (peak looks flatter). A sample of 200 of the top players in a region is not be expected to be normally distributed, and the skew towards a higher win rate as well as a platykurtic distribution (fewer and less influential outliers) seem to represent this. Nevertheless, a multivariate linear regression will be performed on this dataset despite failing the normality condition.
Step11: Multivariate Linear Regression Models
Step12: Scatterplots
Step13: KDA Ratio vs. Win Rate
Step14: Average Turrets Killed vs. Win Rate
Step15: Average Damage Differential vs. Win Rate
Step16: Games Played vs. Win Rate
Step17: OLS Regression
Step18: Test Model 1 Analysis
|
6,842
|
<ASSISTANT_TASK:>
Python Code:
!pip install kaggle
import os
import glob
import zipfile
import functools
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['axes.grid'] = False
mpl.rcParams['figure.figsize'] = (12,12)
from sklearn.model_selection import train_test_split
import matplotlib.image as mpimg
import pandas as pd
from PIL import Image
import tensorflow as tf
import tensorflow.contrib as tfcontrib
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras import backend as K
import os
# Upload the API token.
def get_kaggle_credentials():
token_dir = os.path.join(os.path.expanduser("~"),".kaggle")
token_file = os.path.join(token_dir, "kaggle.json")
if not os.path.isdir(token_dir):
os.mkdir(token_dir)
try:
with open(token_file,'r') as f:
pass
except IOError as no_file:
try:
from google.colab import files
except ImportError:
raise no_file
uploaded = files.upload()
if "kaggle.json" not in uploaded:
raise ValueError("You need an API key! see: "
"https://github.com/Kaggle/kaggle-api#api-credentials")
with open(token_file, "wb") as f:
f.write(uploaded["kaggle.json"])
os.chmod(token_file, 600)
get_kaggle_credentials()
import kaggle
competition_name = 'carvana-image-masking-challenge'
# Download data from Kaggle and unzip the files of interest.
def load_data_from_zip(competition, file):
with zipfile.ZipFile(os.path.join(competition, file), "r") as zip_ref:
unzipped_file = zip_ref.namelist()[0]
zip_ref.extractall(competition)
def get_data(competition):
kaggle.api.competition_download_files(competition, competition)
load_data_from_zip(competition, 'train.zip')
load_data_from_zip(competition, 'train_masks.zip')
load_data_from_zip(competition, 'train_masks.csv.zip')
get_data(competition_name)
img_dir = os.path.join(competition_name, "train")
label_dir = os.path.join(competition_name, "train_masks")
df_train = pd.read_csv(os.path.join(competition_name, 'train_masks.csv'))
ids_train = df_train['img'].map(lambda s: s.split('.')[0])
x_train_filenames = []
y_train_filenames = []
for img_id in ids_train:
x_train_filenames.append(os.path.join(img_dir, "{}.jpg".format(img_id)))
y_train_filenames.append(os.path.join(label_dir, "{}_mask.gif".format(img_id)))
x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = \
train_test_split(x_train_filenames, y_train_filenames, test_size=0.2, random_state=42)
num_train_examples = len(x_train_filenames)
num_val_examples = len(x_val_filenames)
print("Number of training examples: {}".format(num_train_examples))
print("Number of validation examples: {}".format(num_val_examples))
x_train_filenames[:10]
y_train_filenames[:10]
display_num = 5
r_choices = np.random.choice(num_train_examples, display_num)
plt.figure(figsize=(10, 15))
for i in range(0, display_num * 2, 2):
img_num = r_choices[i // 2]
x_pathname = x_train_filenames[img_num]
y_pathname = y_train_filenames[img_num]
plt.subplot(display_num, 2, i + 1)
plt.imshow(mpimg.imread(x_pathname))
plt.title("Original Image")
example_labels = Image.open(y_pathname)
label_vals = np.unique(example_labels)
plt.subplot(display_num, 2, i + 2)
plt.imshow(example_labels)
plt.title("Masked Image")
plt.suptitle("Examples of Images and their Masks")
plt.show()
img_shape = (256, 256, 3)
batch_size = 3
epochs = 5
def _process_pathnames(fname, label_path):
# We map this function onto each pathname pair
img_str = tf.read_file(fname)
img = tf.image.decode_jpeg(img_str, channels=3)
label_img_str = tf.read_file(label_path)
# These are gif images so they return as (num_frames, h, w, c)
label_img = tf.image.decode_gif(label_img_str)[0]
# The label image should only have values of 1 or 0, indicating pixel wise
# object (car) or not (background). We take the first channel only.
label_img = label_img[:, :, 0]
label_img = tf.expand_dims(label_img, axis=-1)
return img, label_img
def shift_img(output_img, label_img, width_shift_range, height_shift_range):
This fn will perform the horizontal or vertical shift
if width_shift_range or height_shift_range:
if width_shift_range:
width_shift_range = tf.random_uniform([],
-width_shift_range * img_shape[1],
width_shift_range * img_shape[1])
if height_shift_range:
height_shift_range = tf.random_uniform([],
-height_shift_range * img_shape[0],
height_shift_range * img_shape[0])
# Translate both
output_img = tfcontrib.image.translate(output_img,
[width_shift_range, height_shift_range])
label_img = tfcontrib.image.translate(label_img,
[width_shift_range, height_shift_range])
return output_img, label_img
def flip_img(horizontal_flip, tr_img, label_img):
if horizontal_flip:
flip_prob = tf.random_uniform([], 0.0, 1.0)
tr_img, label_img = tf.cond(tf.less(flip_prob, 0.5),
lambda: (tf.image.flip_left_right(tr_img), tf.image.flip_left_right(label_img)),
lambda: (tr_img, label_img))
return tr_img, label_img
def _augment(img,
label_img,
resize=None, # Resize the image to some size e.g. [256, 256]
scale=1, # Scale image e.g. 1 / 255.
hue_delta=0, # Adjust the hue of an RGB image by random factor
horizontal_flip=False, # Random left right flip,
width_shift_range=0, # Randomly translate the image horizontally
height_shift_range=0): # Randomly translate the image vertically
if resize is not None:
# Resize both images
label_img = tf.image.resize_images(label_img, resize)
img = tf.image.resize_images(img, resize)
if hue_delta:
img = tf.image.random_hue(img, hue_delta)
img, label_img = flip_img(horizontal_flip, img, label_img)
img, label_img = shift_img(img, label_img, width_shift_range, height_shift_range)
label_img = tf.to_float(label_img) * scale
img = tf.to_float(img) * scale
return img, label_img
def get_baseline_dataset(filenames,
labels,
preproc_fn=functools.partial(_augment),
threads=5,
batch_size=batch_size,
shuffle=True):
num_x = len(filenames)
# Create a dataset from the filenames and labels
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
# Map our preprocessing function to every element in our dataset, taking
# advantage of multithreading
dataset = dataset.map(_process_pathnames, num_parallel_calls=threads)
if preproc_fn.keywords is not None and 'resize' not in preproc_fn.keywords:
assert batch_size == 1, "Batching images must be of the same size"
dataset = dataset.map(preproc_fn, num_parallel_calls=threads)
if shuffle:
dataset = dataset.shuffle(num_x)
# It's necessary to repeat our data for all epochs
dataset = dataset.repeat().batch(batch_size)
return dataset
tr_cfg = {
'resize': [img_shape[0], img_shape[1]],
'scale': 1 / 255.,
'hue_delta': 0.1,
'horizontal_flip': True,
'width_shift_range': 0.1,
'height_shift_range': 0.1
}
tr_preprocessing_fn = functools.partial(_augment, **tr_cfg)
val_cfg = {
'resize': [img_shape[0], img_shape[1]],
'scale': 1 / 255.,
}
val_preprocessing_fn = functools.partial(_augment, **val_cfg)
train_ds = get_baseline_dataset(x_train_filenames,
y_train_filenames,
preproc_fn=tr_preprocessing_fn,
batch_size=batch_size)
val_ds = get_baseline_dataset(x_val_filenames,
y_val_filenames,
preproc_fn=val_preprocessing_fn,
batch_size=batch_size)
temp_ds = get_baseline_dataset(x_train_filenames,
y_train_filenames,
preproc_fn=tr_preprocessing_fn,
batch_size=1,
shuffle=False)
# Let's examine some of these augmented images
data_aug_iter = temp_ds.make_one_shot_iterator()
next_element = data_aug_iter.get_next()
with tf.Session() as sess:
batch_of_imgs, label = sess.run(next_element)
# Running next element in our graph will produce a batch of images
plt.figure(figsize=(10, 10))
img = batch_of_imgs[0]
plt.subplot(1, 2, 1)
plt.imshow(img)
plt.subplot(1, 2, 2)
plt.imshow(label[0, :, :, 0])
plt.show()
def conv_block(input_tensor, num_filters):
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
encoder = layers.BatchNormalization()(encoder)
encoder = layers.Activation('relu')(encoder)
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
encoder = layers.BatchNormalization()(encoder)
encoder = layers.Activation('relu')(encoder)
return encoder
def encoder_block(input_tensor, num_filters):
encoder = conv_block(input_tensor, num_filters)
encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
return encoder_pool, encoder
def decoder_block(input_tensor, concat_tensor, num_filters):
decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
return decoder
inputs = layers.Input(shape=img_shape)
# 256
encoder0_pool, encoder0 = encoder_block(inputs, 32)
# 128
encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64)
# 64
encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128)
# 32
encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256)
# 16
encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512)
# 8
center = conv_block(encoder4_pool, 1024)
# center
decoder4 = decoder_block(center, encoder4, 512)
# 16
decoder3 = decoder_block(decoder4, encoder3, 256)
# 32
decoder2 = decoder_block(decoder3, encoder2, 128)
# 64
decoder1 = decoder_block(decoder2, encoder1, 64)
# 128
decoder0 = decoder_block(decoder1, encoder0, 32)
# 256
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
model = models.Model(inputs=[inputs], outputs=[outputs])
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
model.compile(optimizer='adam', loss=bce_dice_loss, metrics=[dice_loss])
model.summary()
save_model_path = '/tmp/weights.hdf5'
cp = tf.keras.callbacks.ModelCheckpoint(filepath=save_model_path, monitor='val_dice_loss', save_best_only=True, verbose=1)
history = model.fit(train_ds,
steps_per_epoch=int(np.ceil(num_train_examples / float(batch_size))),
epochs=epochs,
validation_data=val_ds,
validation_steps=int(np.ceil(num_val_examples / float(batch_size))),
callbacks=[cp])
dice = history.history['dice_loss']
val_dice = history.history['val_dice_loss']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, dice, label='Training Dice Loss')
plt.plot(epochs_range, val_dice, label='Validation Dice Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Dice Loss')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# Alternatively, load the weights directly: model.load_weights(save_model_path)
model = models.load_model(save_model_path, custom_objects={'bce_dice_loss': bce_dice_loss,
'dice_loss': dice_loss})
# Let's visualize some of the outputs
data_aug_iter = val_ds.make_one_shot_iterator()
next_element = data_aug_iter.get_next()
# Running next element in our graph will produce a batch of images
plt.figure(figsize=(10, 20))
for i in range(5):
batch_of_imgs, label = tf.keras.backend.get_session().run(next_element)
img = batch_of_imgs[0]
predicted_label = model.predict(batch_of_imgs)[0]
plt.subplot(5, 3, 3 * i + 1)
plt.imshow(img)
plt.title("Input image")
plt.subplot(5, 3, 3 * i + 2)
plt.imshow(label[0, :, :, 0])
plt.title("Actual Mask")
plt.subplot(5, 3, 3 * i + 3)
plt.imshow(predicted_label[:, :, 0])
plt.title("Predicted Mask")
plt.suptitle("Examples of Input Image, Label, and Prediction")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get all the files
Step2: Only import kaggle after adding the credentials.
Step3: We'll download the data from Kaggle
Step4: You must accept the competition rules before downloading the data.
Step5: Here's what the paths look like
Step6: Visualize
Step7: Set up
Step8: Using these exact same parameters may be too computationally intensive for your hardware, so tweak the parameters accordingly. Also, it is important to note that due to the architecture of our UNet version, the size of the image must be evenly divisible by a factor of 32, as we down sample the spatial resolution by a factor of 2 with each MaxPooling2Dlayer.
Step10: Shifting the image
Step11: Flipping the image randomly
Step12: Assembling our transformations into our augment function
Step13: Set up train and validation datasets
Step14: Let's see if our image augmentor data pipeline is producing expected results
Step15: Build the model
Step16: Define your model
Step17: Defining custom metrics and loss functions
Step18: Here, we'll use a specialized loss function that combines binary cross entropy and our dice loss. This is based on individuals who competed within this competition obtaining better results empirically. Try out your own custom losses to measure performance (e.g. bce + log(dice_loss), only bce, etc.)!
Step19: Compile your model
Step20: Train your model
Step21: Don't forget to specify our model callback in the fit function call.
Step22: Visualize training process
Step23: Even with only 5 epochs, we see strong performance.
|
6,843
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# We install various packages for approximate Bayesian inference
# To avoid installing packages the internet every time you open a colab,
# you can use this trick:
# https://stackoverflow.com/questions/55253498/how-do-i-install-a-library-permanently-in-colab
# The PyMC3 package (https://docs.pymc.io) supports HMC and variational inference
# https://docs.pymc.io/notebooks/api_quickstart.html
!pip install pymc3==3.8
import pymc3 as pm
pm.__version__
# The arviz package (https://github.com/arviz-devs/arviz) can be used to make various plots
# of posterior samples generated by any algorithm.
!pip install arviz
import arviz as az
# https://github.com/probml/pyprobml/blob/master/scripts/linreg_poly_vs_degree.py
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
import sklearn.metrics
from sklearn.metrics import mean_squared_error as mse
def make_1dregression_data(n=21):
np.random.seed(0)
xtrain = np.linspace(0.0, 20, n)
xtest = np.arange(0.0, 20, 0.1)
sigma2 = 4
w = np.array([-1.5, 1 / 9.0])
fun = lambda x: w[0] * x + w[1] * np.square(x)
ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * np.sqrt(sigma2)
ytest = fun(xtest) + np.random.normal(0, 1, xtest.shape) * np.sqrt(sigma2)
return xtrain, ytrain, xtest, ytest
xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)
# Rescaling data
scaler = MinMaxScaler(feature_range=(-1, 1))
Xtrain = scaler.fit_transform(xtrain.reshape(-1, 1))
Xtest = scaler.transform(xtest.reshape(-1, 1))
degs = np.arange(1, 21, 1)
ndegs = np.max(degs)
mse_train = np.empty(ndegs)
mse_test = np.empty(ndegs)
ytest_pred_stored = np.empty(ndegs, dtype=np.ndarray)
ytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray)
for deg in degs:
model = LinearRegression()
poly_features = PolynomialFeatures(degree=deg, include_bias=False)
Xtrain_poly = poly_features.fit_transform(Xtrain)
model.fit(Xtrain_poly, ytrain)
ytrain_pred = model.predict(Xtrain_poly)
ytrain_pred_stored[deg - 1] = ytrain_pred
Xtest_poly = poly_features.transform(Xtest)
ytest_pred = model.predict(Xtest_poly)
mse_train[deg - 1] = mse(ytrain_pred, ytrain)
mse_test[deg - 1] = mse(ytest_pred, ytest)
ytest_pred_stored[deg - 1] = ytest_pred
# Plot MSE vs degree
fig, ax = plt.subplots()
mask = degs <= 15
ax.plot(degs[mask], mse_test[mask], color="r", marker="x", label="test")
ax.plot(degs[mask], mse_train[mask], color="b", marker="s", label="train")
ax.legend(loc="upper right", shadow=True)
plt.xlabel("degree")
plt.ylabel("mse")
# save_fig('polyfitVsDegree.pdf')
plt.show()
# Plot fitted functions
chosen_degs = [1, 2, 14, 20]
fig, axs = plt.subplots(2, 2, figsize=(10, 5))
axs = axs.reshape(-1)
for i, deg in enumerate(chosen_degs):
axs[i].scatter(xtrain, ytrain)
axs[i].plot(xtest, ytest_pred_stored[deg - 1])
axs[i].set_ylim((-10, 15))
axs[i].set_title("degree {}".format(deg))
fig.tight_layout(h_pad=5, w_pad=5)
plt.show()
# Plot residuals
# https://blog.minitab.com/blog/adventures-in-statistics-2/why-you-need-to-check-your-residual-plots-for-regression-analysis
chosen_degs = [1, 2, 14, 20]
fig, axs = plt.subplots(2, 2, figsize=(10, 5))
axs = axs.reshape(-1)
for i, deg in enumerate(chosen_degs):
ypred = ytrain_pred_stored[deg - 1]
residuals = ytrain - ypred
axs[i].plot(ypred, residuals, "o")
axs[i].set_xlabel("predicted y")
axs[i].set_ylabel("residual")
axs[i].set_title("degree {}".format(deg))
fig.tight_layout(h_pad=5, w_pad=5)
plt.show()
chosen_degs = [1, 2, 14, 20]
for train in [True, False]:
fig, axs = plt.subplots(2, 2, figsize=(10, 5))
axs = axs.reshape(-1)
for i, deg in enumerate(chosen_degs):
if train:
ytrue = ytrain
ypred = ytrain_pred_stored[deg - 1]
dataset = "Train"
else:
ytrue = ytest
ypred = ytest_pred_stored[deg - 1]
dataset = "Test"
axs[i].scatter(ytrue, ypred)
# Plot diagonal line
axs[i].plot(axs[i].get_xlim(), axs[i].get_ylim(), ls="--", c=".3")
axs[i].set_xlabel("true y")
axs[i].set_ylabel("predicted y")
r2 = sklearn.metrics.r2_score(ytrue, ypred)
axs[i].set_title("degree {}. R2 on {} = {:0.3f}".format(deg, dataset, r2))
fig.tight_layout(h_pad=5, w_pad=5)
plt.show()
np.random.seed(1)
N = 100
alpha_real = 2.5
beta_real = 0.9
noiseSD = 0.5
eps_real = np.random.normal(0, noiseSD, size=N)
x = np.random.normal(10, 1, N) # centered on 10
y_real = alpha_real + beta_real * x
y = y_real + eps_real
# save untransformed data for later
x_orig = x
y_orig = y
_, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].plot(x, y, "C0.")
ax[0].set_xlabel("x")
ax[0].set_ylabel("y", rotation=0)
ax[0].plot(x, y_real, "k")
az.plot_kde(y, ax=ax[1])
ax[1].set_xlabel("y")
plt.tight_layout()
# Fit posterior with MCMC instead of analytically (for simplicity and flexibility)
# This is the same as BAP code, except we fix the noise variance to a constant.
with pm.Model() as model_g:
α = pm.Normal("α", mu=0, sd=10)
β = pm.Normal("β", mu=0, sd=1)
ϵ = pm.HalfCauchy("ϵ", 5)
μ = pm.Deterministic("μ", α + β * x)
y_pred = pm.Normal("y_pred", mu=μ, sd=ϵ, observed=y)
# y_pred = pm.Normal('y_pred', mu=μ, sd=noiseSD, observed=y)
trace_g = pm.sample(1000)
az.plot_trace(trace_g, var_names=["α", "β"])
az.plot_pair(trace_g, var_names=["α", "β"], plot_kwargs={"alpha": 0.1});
# To reduce the correlation between alpha and beta, we can center the data
x_centered = x_orig - x_orig.mean()
# or standardize the data
# x = (x - x.mean())/x.std()
# y = (y - y.mean())/y.std()
with pm.Model() as model_g_centered:
α = pm.Normal("α", mu=0, sd=10)
β = pm.Normal("β", mu=0, sd=1)
ϵ = pm.HalfCauchy("ϵ", 5)
μ = pm.Deterministic("μ", α + β * x_centered)
y_pred = pm.Normal("y_pred", mu=μ, sd=ϵ, observed=y)
trace_g_centered = pm.sample(1000)
az.plot_pair(trace_g_centered, var_names=["α", "β"], plot_kwargs={"alpha": 0.1});
# Samples from posterior predictive
plt.plot(x, y, "C0.")
alpha_m = trace_g["α"].mean()
beta_m = trace_g["β"].mean()
np.random.seed(0)
nsamples = len(trace_g["α"])
print(nsamples)
draws = range(0, nsamples, 200)
plt.plot(x, trace_g["α"][draws] + trace_g["β"][draws] * x[:, np.newaxis], c="gray", alpha=0.5)
plt.plot(x, alpha_m + beta_m * x, c="k", label=f"y = {alpha_m:.2f} + {beta_m:.2f} * x")
plt.xlabel("x")
plt.ylabel("y", rotation=0)
plt.legend()
# HPD for posterior predictive
plt.plot(x, alpha_m + beta_m * x, c="k", label=f"y = {alpha_m:.2f} + {beta_m:.2f} * x")
sig = az.plot_hpd(x, trace_g["μ"], credible_interval=0.98, color="k")
plt.xlabel("x")
plt.ylabel("y", rotation=0)
plt.legend()
# arviz has nicer plotting
ppc = pm.sample_posterior_predictive(trace_g, samples=2000, model=model_g)
plt.plot(x, alpha_m + beta_m * x, c="k", label=f"y = {alpha_m:.2f} + {beta_m:.2f} * x")
az.plot_hpd(x, ppc["y_pred"], credible_interval=0.5, color="gray") # central 50% HPD
az.plot_hpd(x, ppc["y_pred"], color="gray") # 94% HPD
plt.xlabel("x")
plt.ylabel("y", rotation=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Linear regression (MLE)
Step2: Bayesian linear regression
Step3: Compute posterior
Step4: Compute posterior predictive
|
6,844
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from parcels import FieldSet, ParticleSet, JITParticle, AdvectionRK4, ParticleFile, plotTrajectoriesFile
import numpy as np
from datetime import timedelta as delta
from os import path
filenames = path.join('SWASH_data', 'field_*.nc')
variables = {'U': 'cross-shore velocity',
'V': 'along-shore velocity',
'depth_u': 'time varying depth_u'}
dimensions = {'U': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'},
'V': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'},
'depth_u': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'}}
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions, mesh='flat', allow_time_extrapolation=True)
fieldset.U.set_depth_from_field(fieldset.depth_u)
fieldset.V.set_depth_from_field(fieldset.depth_u)
pset = ParticleSet(fieldset, JITParticle, lon=9.5, lat=12.5, depth=-0.1)
pfile = pset.ParticleFile("SwashParticles", outputdt=delta(seconds=0.05))
pset.execute(AdvectionRK4, dt=delta(seconds=0.005), output_file=pfile)
pfile.export() # export the trajectory data to a netcdf file
plotTrajectoriesFile('SwashParticles.nc');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here, we use sample data from the SWASH model. We first set the filenames and variables
Step2: Now, the first key step when reading time-evolving depth dimensions is that we specify depth as 'not_yet_set' in the dimensions dictionary
Step3: Then, after we create the FieldSet object, we set the depth dimension of the relevant Fields to fieldset.depth_u and fieldset.depth_w, using the Field.set_depth_from_field() method
Step4: Now, we can create a ParticleSet, run those and plot them
|
6,845
|
<ASSISTANT_TASK:>
Python Code:
#@title Copyright 2020 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import tensorflow_hub as hub
from io import BytesIO
from PIL import Image as PilImage
import numpy as np
from matplotlib import pyplot as plt
from six.moves.urllib.request import urlopen
def read_image(filename):
fd = None
if(filename.startswith('http')):
fd = urlopen(filename)
else:
fd = tf.io.gfile.GFile(filename, 'rb')
pil_image = PilImage.open(fd)
width, height = pil_image.size
# crop to make the image square
pil_image = pil_image.crop((0, 0, height, height))
pil_image = pil_image.resize((257,257),PilImage.ANTIALIAS)
image_unscaled = np.array(pil_image)
image_np = np.expand_dims(
image_unscaled.astype(np.float32) / 255., axis=0)
return image_np
def visualize_output_comparison(img_original, img_masked, img_filled):
plt.figure(figsize=(24,12))
plt.subplot(131)
plt.imshow((np.squeeze(img_original)))
plt.title("Original", fontsize=24)
plt.axis('off')
plt.subplot(132)
plt.imshow((np.squeeze(img_masked)))
plt.title("Masked", fontsize=24)
plt.axis('off')
plt.subplot(133)
plt.imshow((np.squeeze(img_filled)))
plt.title("Generated", fontsize=24)
plt.axis('off')
plt.show()
wikimedia = "https://upload.wikimedia.org/wikipedia/commons/thumb/3/31/Nusfjord_road%2C_2010_09.jpg/800px-Nusfjord_road%2C_2010_09.jpg"
# wikimedia = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/Beech_forest_M%C3%A1tra_in_winter.jpg/640px-Beech_forest_M%C3%A1tra_in_winter.jpg"
# wikimedia = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/b2/Marmolada_Sunset.jpg/640px-Marmolada_Sunset.jpg"
# wikimedia = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Aegina_sunset.jpg/640px-Aegina_sunset.jpg"
input_img = read_image(wikimedia)
#@title Model Selection { display-mode: "form" }
model_name = 'Boundless Quarter' # @param ['Boundless Half', 'Boundless Quarter', 'Boundless Three Quarters']
model_handle_map = {
'Boundless Half' : 'https://tfhub.dev/google/boundless/half/1',
'Boundless Quarter' : 'https://tfhub.dev/google/boundless/quarter/1',
'Boundless Three Quarters' : 'https://tfhub.dev/google/boundless/three_quarter/1'
}
model_handle = model_handle_map[model_name]
print("Loading model {} ({})".format(model_name, model_handle))
model = hub.load(model_handle)
result = model.signatures['default'](tf.constant(input_img))
generated_image = result['default']
masked_image = result['masked_image']
visualize_output_comparison(input_img, masked_image, generated_image)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <table class="tfo-notebook-buttons" align="left">
Step2: Reading image for input
Step3: Visualization method
Step4: Loading an Image
Step5: Selecting a model from TensorFlow Hub
Step6: Now that we've chosen the model we want, lets load it from TensorFlow Hub.
Step7: Doing Inference
|
6,846
|
<ASSISTANT_TASK:>
Python Code:
from problem_formulation import joint
joint
sobol_samples = joint.sample(10000, rule="sobol")
antithetic_samples = joint.sample(10000, antithetic=True, seed=1234)
halton_samples = joint.sample(10000, rule="halton")
from matplotlib import pyplot
pyplot.rc("figure", figsize=[16, 4])
pyplot.subplot(131)
pyplot.scatter(*sobol_samples[:, :1000])
pyplot.title("sobol")
pyplot.subplot(132)
pyplot.scatter(*antithetic_samples[:, :1000])
pyplot.title("antithetic variates")
pyplot.subplot(133)
pyplot.scatter(*halton_samples[:, :1000])
pyplot.title("halton")
pyplot.show()
from problem_formulation import model_solver, coordinates
import numpy
sobol_evals = numpy.array([
model_solver(sample) for sample in sobol_samples.T])
antithetic_evals = numpy.array([
model_solver(sample) for sample in antithetic_samples.T])
halton_evals = numpy.array([
model_solver(sample) for sample in halton_samples.T])
pyplot.subplot(131)
pyplot.plot(coordinates, sobol_evals[:100].T, alpha=0.3)
pyplot.title("sobol")
pyplot.subplot(132)
pyplot.plot(coordinates, antithetic_evals[:100].T, alpha=0.3)
pyplot.title("antithetic variate")
pyplot.subplot(133)
pyplot.plot(coordinates, halton_evals[:100].T, alpha=0.3)
pyplot.title("halton")
pyplot.show()
from problem_formulation import error_in_mean, indices, eps_mean
eps_sobol_mean = [error_in_mean(
numpy.mean(sobol_evals[:idx], 0)) for idx in indices]
eps_antithetic_mean = [error_in_mean(
numpy.mean(antithetic_evals[:idx], 0)) for idx in indices]
eps_halton_mean = [error_in_mean(
numpy.mean(halton_evals[:idx], 0)) for idx in indices]
pyplot.rc("figure", figsize=[6, 4])
pyplot.semilogy(indices, eps_mean, "r", label="random")
pyplot.semilogy(indices, eps_sobol_mean, "-", label="sobol")
pyplot.semilogy(indices, eps_antithetic_mean, ":", label="antithetic")
pyplot.semilogy(indices, eps_halton_mean, "--", label="halton")
pyplot.legend()
pyplot.show()
from problem_formulation import error_in_variance, eps_variance
eps_halton_variance = [error_in_variance(
numpy.var(halton_evals[:idx], 0)) for idx in indices]
eps_sobol_variance = [error_in_variance(
numpy.var(sobol_evals[:idx], 0)) for idx in indices]
eps_antithetic_variance = [error_in_variance(
numpy.var(antithetic_evals[:idx], 0)) for idx in indices]
pyplot.semilogy(indices, eps_variance, "r", label="random")
pyplot.semilogy(indices, eps_sobol_variance, "-", label="sobol")
pyplot.semilogy(indices, eps_antithetic_variance, ":", label="antithetic")
pyplot.semilogy(indices, eps_halton_variance, "--", label="halton")
pyplot.legend()
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then we generate samples from the three schemes
Step2: From the three plots above it is easy to see both how the Sobol sequence have
Step3: Error analysis
Step4: Here we see that for our little problem, all new schemes outperforms
|
6,847
|
<ASSISTANT_TASK:>
Python Code:
notas = {'bia': 10, 'pedro': 0, 'ana': 7}
notas
notas.keys()
notas.pop('bia')
notas
dir(notas)
notas
notas.__getitem__('ana')
notas['ana']
notas.__getitem__('joselito')
notas['joselito']
notas.__class__
notas.__doc__
print(notas.__doc__)
3 + 4
print(3 .__doc__)
3 .__add__(4)
3 .__sub__(4)
help(3)
def soma(a, b):
retorna a + b
soma = a + b
return soma
soma(1, 2)
soma
adição = soma
adição
adição.__name__
adição.__doc__
import dis
dis.dis(soma)
"1" + 10
"1" + str(10)
int("1") + 10
def dobra(x):
return x * 2
dobra(2)
dobra(1.15)
dobra('bo')
dobra([1, 2, 3])
dobra((4, 5, 6))
dobra(None)
type(1)
type([1, 2, 3])
type((1, 2, 3))
type({})
type('lalala')
type(False)
a = 10
a
id(a)
b = 3
b
a += b
a
id(a)
lista = [1, 2, 3, 4]
lista
id(lista)
lista.append(10)
lista.remove(2)
lista += [-4, -3]
lista
id(lista)
s = 'abcd'
id(s)
s[0] = 'z'
s = 'z' + s[1:]
s
id(s)
a = [1, 2, 3]
a
b = a
a.append(4)
b
c = 1 / 0
c
josé = {'nome': 'José Silva', 'idade': 10}
zé = josé
zé is josé
id(zé), id(josé)
zé['ano_nascimento'] = 2006
josé
joão = {'nome': 'José Silva', 'idade': 10, 'ano_nascimento': 2006}
joão == josé
joão is josé
a = 10
a is None
b = None
b is None
class Cão:
qtd_patas = 4
carnívoro = True
nervoso = False
def __init__(self, nome):
self.nome = nome
rex = Cão('Rex')
type(rex)
rex.qtd_patas
rex.carnívoro
rex.nervoso
rex.nome
rex.nervoso = True
rex.nervoso
Cão.nervoso
rex.sujo = True
rex.sujo
rex.idade = 5
rex.idade
Cão.sujo
Cão.idade
Cão.__name__
Cão.qtd_patas
Cão.nervoso
Cão.carnívoro
Cão.nome
fido = Cão('Fido')
fido.nome
class Cão:
qtd_patas = 4
carnívoro = True
nervoso = False
def __init__(self, nome):
self.nome = nome
def latir(self, vezes=1):
Latir do cão. Quanto mais nervoso mais late.
vezes += self.nervoso * vezes
latido = 'Au! ' * vezes
print('{}: {}'.format(self.nome, latido))
rex = Cão('Rex')
rex.latir()
rex.nervoso = True
rex.latir()
rex.latir(10)
class Cão:
qtd_patas = 4
carnívoro = True
nervoso = False
def __init__(self, nome, truques=None):
self.nome = nome
if not truques:
self.truques = []
else:
self.truques = list(truques)
def latir(self, vezes=1):
Latir do cão. Quanto mais nervoso mais late.
vezes += self.nervoso * vezes
latido = 'Au! ' * vezes
print('{}: {}'.format(self.nome, latido))
def ensina_truque(self, truque):
if truque not in self.truques:
self.truques.append(truque)
fido = Cão('Fido', truques=['Pegar'])
fido.truques
fido.ensina_truque('Rolar')
fido.truques
fido.ensina_truque('Pegar')
fido.truques
class ExemploInstancia:
def metodo_instancia(self):
print('Recebi {}'.format(self))
ExemploInstancia.metodo_instancia()
inst = ExemploInstancia()
inst.metodo_instancia()
class ExemploClasse:
@classmethod
def metodo_classe(cls):
print("Recebi {}".format(cls))
ExemploClasse.metodo_classe()
inst = ExemploClasse()
inst.metodo_classe()
class Exemplo:
@staticmethod
def metodo_estático():
print('Sou estátio e não recebo nada')
Exemplo.metodo_estático()
inst = Exemplo()
inst.metodo_estático()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: O dicionários possui diversos métodos que usamos para alterar os objetos
Step2: Podemos usar a função dir() para inspecionar os métodos e atributos do dict notas
Step3: Aqui vemos vários métodos que o nome contém underscores no começo e fim como __len__, __getitem__, __setitem__. Esses métodos são chamados de métodos especiais que fazem parte do modelo de dados do Python. Esses métodos são chamados pelo interpretador quando uma sintaxe especial é acionada. Como, por exemplo, quando acessamos os itens do dicionário por sua chave o interpretador invoca a função dict.__getitem__()
Step4: O dict também possui atributos de dados especiais como __class__, que armazena o nome da classe do objeto, e __doc__ que retém a docstring do objeto
Step5: Para ver a docstring formatada para saída use a função print()
Step6: Números são objetos
Step7: Possuem métodos e atributos
Step8: Só lembrando que os métodos especiais não devem ser chamados diretamente, os exemplos anteriores só servem para ilustrar o funcionamento e existência desses métodos. Caso você queira consultar a documentação de um objeto use a função help()
Step10: Como explicado na [py-intro] Aula 05 funções também são objetos. Na terminologia utilizada pelos livros isso quer dizer que, em Python, as funções são objetos de primeira classe ou cidadãos de primeira classe.
Step11: Podemos a atribuir funções a variáveis
Step12: Acessar atributos
Step13: Podemos ver o bytecode que a função executa usando o módudlo dis (disassembly), enviando a função soma() como argumento da função dis.dis()
Step14: Tipagem dos objetos
Step15: Tentamos concatenar o número 10 à string "1", porém uma exceção do tipo TypeError foi levantada dizendo que não foi possível converter um objeto int para str de forma implicita.
Step16: Tipagem dinâmica
Step17: Podemos dobrar int
Step18: Dobrar float
Step19: strings
Step20: sequências
Step21: Tipos que não suportam multiplicação por inteiros levantarão exceção quando executados
Step22: A função type() nos permite verificar os tipos dos objetos
Step23: Mutabilidade
Step24: Todo objeto python possui uma identidade, um número único que diferencia esse objeto. Podemos acessar a identidade de um objeto usando a função id()
Step25: Isso quer dizer que a identidade do objeto a é 10894368.
Step26: A identidade mudou, isso significa que a variável a está referenciando outro objeto que foi criado quando executamos a += b.
Step27: Vamos verificar a identidade dessa lista
Step28: Mesmo modificando a lista através da inserção e remoção de valores sua identidade continua a mesma.
Step29: Como vimos na aula dois do módulo de introdução strings são imutáveis e para alterar seu valor precisamos usar slicing
Step30: Comparando a identidade de s antes e depois da mudança vemos que trata-se de objetos diferentes.
Step31: As variáveis a e b armazenam referências à mesma lista em vez de cópias.
Step32: Como não foi possível criar o número - por representar uma operação inválida (divisão por zero) para a linguagem - a variável c não foi atribuída a nenhum objeto
Step33: Como as variáveis são apenas rótulos a forma correta de falar sobre atribuição é "a variável x foi atribuída à (instância) lâmpada" e não "a lâmpada foi atribuída à variável x". Pois é como se colocássemos um "post-it" x em um objeto, e não guardássemos esse objeto em uma caixa x.
Step34: Vamos supor que exista um impostor - o João - que possua as mesmas credenciais que o José Silva. Suas credenciais são as mesmas, porém João não é José
Step35: O valor de seus dados (ou credenciais) são iguais, porém eles não são os mesmos
Step36: Nesse exemplo vimos o apelidamento (aliasing). josé e zé são apelidos (aliases)
Step37: Classes
Step38: Na primeira linha definimos uma classe de nome Cão.
Step39: Vamos verificar seus atributos
Step40: Podemos também alterar esses atributos
Step41: Mudamos apenas o atributo nervoso da instância rex. O valor de Cão.nervoso continua o mesmo
Step42: Também podemos criar atributos dinamicamente para nossa instância rex
Step43: Lembrando mais uma vez que essas mudanças ocorrem somente na instância e não na classe
Step44: Classes também são objetos e podemos acessar seus atributos
Step45: Não podemos acessar o nome, pois nome é um atributo que é associado somente a instâncias da classe.
Step47: Os atributos de classe são usados para fornecerer valores padrão para dados que são compartilhados por todos os "cães" como, por exemplo, a quantidade de patas.
Step49: Vamos brincar um pouco mais com o Cão e implementar ainda mais métodos
Step50: Métodos de instância, classe e estático
Step51: Não podemos chamar o método de instância somente com a classe
Step52: Precisamos criar uma instância para utilizá-lo
Step53: Já os métodos de classe (ou class methods) são métodos referentes à classe como um todo e recebem - não uma instância mas o - objeto da classe.
Step54: Podemos chamar o método usando o objeto de classe ExemploClasse
Step55: Também podemos chamar o método a partir de uma instância dessa classe. Por ser um classmethod o método continuará a receber como argumento o objeto da classe e não a instância
Step56: Por fim também temos os métodos estáticos que funcionam como funções simples agregadas a objetos ou classes. Eles não recebem argumentos de forma automática
Step57: Também podemos chamar o método estático a partir de uma instância
|
6,848
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-1', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
6,849
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alex Rockhill <aprockhill@mailbox.org>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import nilearn.plotting
from dipy.align import resample
import mne
from mne.datasets import fetch_fsaverage
# paths to mne datasets: sample sEEG and FreeSurfer's fsaverage subject,
# which is in MNI space
misc_path = mne.datasets.misc.data_path()
sample_path = mne.datasets.sample.data_path()
subjects_dir = op.join(sample_path, 'subjects')
# use mne-python's fsaverage data
fetch_fsaverage(subjects_dir=subjects_dir, verbose=True) # downloads if needed
# GUI requires pyvista backend
mne.viz.set_3d_backend('pyvistaqt')
T1 = nib.load(op.join(misc_path, 'seeg', 'sample_seeg', 'mri', 'T1.mgz'))
viewer = T1.orthoview()
viewer.set_position(0, 9.9, 5.8)
viewer.figs[0].axes[0].annotate(
'PC', (107, 108), xytext=(10, 75), color='white',
horizontalalignment='center',
arrowprops=dict(facecolor='white', lw=0.5, width=2, headwidth=5))
viewer.figs[0].axes[0].annotate(
'AC', (137, 108), xytext=(246, 75), color='white',
horizontalalignment='center',
arrowprops=dict(facecolor='white', lw=0.5, width=2, headwidth=5))
def plot_overlay(image, compare, title, thresh=None):
Define a helper function for comparing plots.
image = nib.orientations.apply_orientation(
np.asarray(image.dataobj), nib.orientations.axcodes2ornt(
nib.orientations.aff2axcodes(image.affine))).astype(np.float32)
compare = nib.orientations.apply_orientation(
np.asarray(compare.dataobj), nib.orientations.axcodes2ornt(
nib.orientations.aff2axcodes(compare.affine))).astype(np.float32)
if thresh is not None:
compare[compare < np.quantile(compare, thresh)] = np.nan
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
fig.suptitle(title)
for i, ax in enumerate(axes):
ax.imshow(np.take(image, [image.shape[i] // 2], axis=i).squeeze().T,
cmap='gray')
ax.imshow(np.take(compare, [compare.shape[i] // 2],
axis=i).squeeze().T, cmap='gist_heat', alpha=0.5)
ax.invert_yaxis()
ax.axis('off')
fig.tight_layout()
CT_orig = nib.load(op.join(misc_path, 'seeg', 'sample_seeg_CT.mgz'))
# resample to T1's definition of world coordinates
CT_resampled = resample(moving=np.asarray(CT_orig.dataobj),
static=np.asarray(T1.dataobj),
moving_affine=CT_orig.affine,
static_affine=T1.affine)
plot_overlay(T1, CT_resampled, 'Unaligned CT Overlaid on T1', thresh=0.95)
del CT_resampled
reg_affine = np.array([
[0.99270756, -0.03243313, 0.11610254, -133.094156],
[0.04374389, 0.99439665, -0.09623816, -97.58320673],
[-0.11233068, 0.10061512, 0.98856381, -84.45551601],
[0., 0., 0., 1.]])
# use a cval='1%' here to make the values outside the domain of the CT
# the same as the background level during interpolation
CT_aligned = mne.transforms.apply_volume_registration(
CT_orig, T1, reg_affine, cval='1%')
plot_overlay(T1, CT_aligned, 'Aligned CT Overlaid on T1', thresh=0.95)
del CT_orig
# make low intensity parts of the CT transparent for easier visualization
CT_data = CT_aligned.get_fdata().copy()
CT_data[CT_data < np.quantile(CT_data, 0.95)] = np.nan
T1_data = np.asarray(T1.dataobj)
fig, axes = plt.subplots(1, 3, figsize=(12, 6))
for ax in axes:
ax.axis('off')
axes[0].imshow(T1_data[T1.shape[0] // 2], cmap='gray')
axes[0].set_title('MR')
axes[1].imshow(np.asarray(CT_aligned.dataobj)[CT_aligned.shape[0] // 2],
cmap='gray')
axes[1].set_title('CT')
axes[2].imshow(T1_data[T1.shape[0] // 2], cmap='gray')
axes[2].imshow(CT_data[CT_aligned.shape[0] // 2], cmap='gist_heat', alpha=0.5)
for ax in (axes[0], axes[2]):
ax.annotate('Subcutaneous fat', (110, 52), xytext=(100, 30),
color='white', horizontalalignment='center',
arrowprops=dict(facecolor='white'))
for ax in axes:
ax.annotate('Skull (dark in MR, bright in CT)', (40, 175),
xytext=(120, 246), horizontalalignment='center',
color='white', arrowprops=dict(facecolor='white'))
axes[2].set_title('CT aligned to MR')
fig.tight_layout()
del CT_data, T1
# estimate head->mri transform
subj_trans = mne.coreg.estimate_head_mri_t(
'sample_seeg', op.join(misc_path, 'seeg'))
# load electrophysiology data to find channel locations for
# (the channels are already located in the example)
raw = mne.io.read_raw(op.join(misc_path, 'seeg', 'sample_seeg_ieeg.fif'))
gui = mne.gui.locate_ieeg(raw.info, subj_trans, CT_aligned,
subject='sample_seeg',
subjects_dir=op.join(misc_path, 'seeg'))
# The `raw` object is modified to contain the channel locations
# after closing the GUI and can now be saved
# gui.close() # typically you close when done
T1_ecog = nib.load(op.join(misc_path, 'ecog', 'sample_ecog', 'mri', 'T1.mgz'))
CT_orig_ecog = nib.load(op.join(misc_path, 'ecog', 'sample_ecog_CT.mgz'))
# pre-computed affine from `mne.transforms.compute_volume_registration`
reg_affine = np.array([
[0.99982382, -0.00414586, -0.01830679, 0.15413965],
[0.00549597, 0.99721885, 0.07432601, -1.54316131],
[0.01794773, -0.07441352, 0.99706595, -1.84162514],
[0., 0., 0., 1.]])
# align CT
CT_aligned_ecog = mne.transforms.apply_volume_registration(
CT_orig_ecog, T1_ecog, reg_affine, cval='1%')
raw_ecog = mne.io.read_raw(op.join(misc_path, 'ecog', 'sample_ecog_ieeg.fif'))
# use estimated `trans` which was used when the locations were found previously
subj_trans_ecog = mne.coreg.estimate_head_mri_t(
'sample_ecog', op.join(misc_path, 'ecog'))
gui = mne.gui.locate_ieeg(raw_ecog.info, subj_trans_ecog, CT_aligned_ecog,
subject='sample_ecog',
subjects_dir=op.join(misc_path, 'ecog'))
# plot projected sensors
brain_kwargs = dict(cortex='low_contrast', alpha=0.2, background='white')
brain = mne.viz.Brain('sample_ecog', subjects_dir=op.join(misc_path, 'ecog'),
title='Before Projection', **brain_kwargs)
brain.add_sensors(raw_ecog.info, trans=subj_trans_ecog)
view_kwargs = dict(azimuth=60, elevation=100, distance=350,
focalpoint=(0, 0, -15))
brain.show_view(**view_kwargs)
# project sensors to the brain surface
raw_ecog.info = mne.preprocessing.ieeg.project_sensors_onto_brain(
raw_ecog.info, subj_trans_ecog, 'sample_ecog',
subjects_dir=op.join(misc_path, 'ecog'))
# plot projected sensors
brain = mne.viz.Brain('sample_ecog', subjects_dir=op.join(misc_path, 'ecog'),
title='After Projection', **brain_kwargs)
brain.add_sensors(raw_ecog.info, trans=subj_trans_ecog)
brain.show_view(**view_kwargs)
# plot the alignment
brain = mne.viz.Brain('sample_seeg', subjects_dir=op.join(misc_path, 'seeg'),
**brain_kwargs)
brain.add_sensors(raw.info, trans=subj_trans)
brain.show_view(**view_kwargs)
# load the subject's brain and the Freesurfer "fsaverage" template brain
subject_brain = nib.load(
op.join(misc_path, 'seeg', 'sample_seeg', 'mri', 'brain.mgz'))
template_brain = nib.load(
op.join(subjects_dir, 'fsaverage', 'mri', 'brain.mgz'))
plot_overlay(template_brain, subject_brain,
'Alignment with fsaverage before Affine Registration')
zooms = dict(translation=10, rigid=10, affine=10, sdr=5)
reg_affine, sdr_morph = mne.transforms.compute_volume_registration(
subject_brain, template_brain, zooms=zooms, verbose=True)
subject_brain_sdr = mne.transforms.apply_volume_registration(
subject_brain, template_brain, reg_affine, sdr_morph)
# apply the transform to the subject brain to plot it
plot_overlay(template_brain, subject_brain_sdr,
'Alignment with fsaverage after SDR Registration')
del subject_brain, template_brain
# first we need our montage but it needs to be converted to "mri" coordinates
# using our ``subj_trans``
montage = raw.get_montage()
montage.apply_trans(subj_trans)
montage_warped, elec_image, warped_elec_image = mne.warp_montage_volume(
montage, CT_aligned, reg_affine, sdr_morph, thresh=0.25,
subject_from='sample_seeg', subjects_dir_from=op.join(misc_path, 'seeg'),
subject_to='fsaverage', subjects_dir_to=subjects_dir)
fig, axes = plt.subplots(2, 1, figsize=(8, 8))
nilearn.plotting.plot_glass_brain(elec_image, axes=axes[0], cmap='Dark2')
fig.text(0.1, 0.65, 'Subject T1', rotation='vertical')
nilearn.plotting.plot_glass_brain(warped_elec_image, axes=axes[1],
cmap='Dark2')
fig.text(0.1, 0.25, 'fsaverage', rotation='vertical')
fig.suptitle('Electrodes warped to fsaverage')
del CT_aligned
# first we need to add fiducials so that we can define the "head" coordinate
# frame in terms of them (with the origin at the center between LPA and RPA)
montage_warped.add_estimated_fiducials('fsaverage', subjects_dir)
# compute the head<->mri ``trans`` now using the fiducials
template_trans = mne.channels.compute_native_head_t(montage_warped)
# now we can set the montage and, because there are fiducials in the montage,
# the montage will be properly transformed to "head" coordinates when we do
# (this step uses ``template_trans`` but it is recomputed behind the scenes)
raw.set_montage(montage_warped)
# plot the resulting alignment
brain = mne.viz.Brain('fsaverage', subjects_dir=subjects_dir, **brain_kwargs)
brain.add_sensors(raw.info, trans=template_trans)
brain.show_view(**view_kwargs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Aligning the T1 to ACPC
Step3: Freesurfer recon-all
Step4: Now we need to align our CT image to the T1 image.
Step5: <div class="alert alert-info"><h4>Note</h4><p>Alignment failures sometimes occur which requires manual pre-alignment.
Step6: Now we need to estimate the "head" coordinate transform.
Step7: Marking the Location of Each Electrode Contact
Step8: Let's do a quick sidebar and show what this looks like for ECoG as well.
Step9: for ECoG, we typically want to account for "brain shift" or shrinking of the
Step10: Now, let's project the sensors to the brain surface and re-plot them.
Step11: Let's plot the electrode contact locations on the subject's brain.
Step12: Warping to a Common Atlas
Step13: Now, we'll register the affine of the subject's brain to the template brain.
Step14: Finally, we'll apply the registrations to the electrode contact coordinates.
Step15: We can now plot the result. You can compare this to the plot in
|
6,850
|
<ASSISTANT_TASK:>
Python Code:
import pymc3 as pm
import seaborn as sns
import scipy
import scipy.stats
import numpy as np
import spacepy.toolbox as tb
import matplotlib.pyplot as plt
from theano import theano, tensor as tt
sns.set(font_scale=1.5)
from pymc3.distributions.dist_math import bound
def pot1(z):
z = z.T
return .5*((z.norm(2, axis=0)-2.)/.4)**2 - tt.log(tt.exp(-.5*((z[0]-2.)/.6)**2) +
tt.exp(-.5*((z[0]+2.)/.6)**2))
def cust_logp(z):
# this works to cut things off, useful or not but 0 is a nature place a lot of times
return bound(-pot1(z), z>-5, z<5)
# return -pot1(z)
with pm.Model() as pot1m:
pm.DensityDist('pot1', logp=cust_logp, shape=(2,))
trace = pm.sample(1000)
pm.traceplot(trace);
pm.plot_posterior(trace);
dist = scipy.stats.rayleigh(loc=2, scale=20)
rvs = dist.rvs(10000)
sns.distplot(rvs)
# need to make a callable function of the histogram of this.
h, b = np.histogram(rvs, 30)
b = tb.bin_edges_to_center(b)
plt.plot(b, h)
f = scipy.interpolate.interp1d(b,h, bounds_error=False, fill_value=0)
XX = np.linspace(0, 100, 1000)
plt.plot(XX, f(XX))
from theano.tensor import _shared
# @theano.as_op(itypes=[theano.tensor.dscalar],otypes=[theano.tensor.dscalar])
def dens_fn(x):
return tt.log(f(x) )
# dens_fn(_shared(np.linspace(-1, 5, 10)))
with pm.Model() as model:
dens = pm.DensityDist('density', logp=dens_fn)
trace = pm.sample(5000)
pm.traceplot(trace);
pm.plot_posterior(trace);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Given example
Step2: Now can we do this from a histogram?
|
6,851
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [5e4, 5e5, 5e6]
results = {}
best_val = -1
best_svm = None
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print(test_accuracy)
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
print(X_train_feats.shape)
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (net.predict(X_test_feats) == y_test).mean()
print(test_acc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Inline question 1
|
6,852
|
<ASSISTANT_TASK:>
Python Code:
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print("Downloaded " + local)
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkstats2.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkplot.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/nsfg.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dct")
download(
"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dat.gz"
)
import nsfg
preg = nsfg.ReadFemPreg()
preg.head()
preg.columns
preg.columns[1]
pregordr = preg['pregordr']
type(pregordr)
pregordr
pregordr[0]
pregordr[2:5]
pregordr = preg.pregordr
preg.outcome.value_counts().sort_index()
preg.birthwgt_lb.value_counts().sort_index()
caseid = 10229
preg_map = nsfg.MakePregMap(preg)
indices = preg_map[caseid]
preg.outcome[indices].values
preg.birthord.isnull().sum()
preg.totalwgt_lb.mean()
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemResp.dct")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemResp.dat.gz")
resp = nsfg.ReadFemResp()
resp.head()
resp[resp.caseid==2298]
preg[preg.caseid==2298]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples from Chapter 1
Step2: Print the column names.
Step3: Select a single column name.
Step4: Select a column and check what type it is.
Step5: Print a column.
Step6: Select a single element from a column.
Step7: Select a slice from a column.
Step8: Select a column using dot notation.
Step9: Count the number of times each value occurs.
Step10: Check the values of another variable.
Step11: Make a dictionary that maps from each respondent's caseid to a list of indices into the pregnancy DataFrame. Use it to select the pregnancy outcomes for a single respondent.
Step12: Exercises
Step13: Select the prglngth column, print the value counts, and compare to results published in the codebook
Step14: Create a new column named <tt>totalwgt_kg</tt> that contains birth weight in kilograms. Compute its mean. Remember that when you create a new column, you have to use dictionary syntax, not dot notation.
Step15: DataFrame provides a method head that displays the first five rows
Step16: Select the age_r column from resp and print the value counts. How old are the youngest and oldest respondents?
Step17: And we can get the corresponding rows from preg like this
|
6,853
|
<ASSISTANT_TASK:>
Python Code:
%%bash
ipython profile create blake
mkdir /Users/houghb/.ipython/profile_blake/static/
mkdir /Users/houghb/.ipython/profile_blake/static/custom/
touch /Users/houghb/.ipython/profile_blake/static/custom/custom.css
%%file /Users/houghb/.ipython/profile_blake/static/custom/custom.css
/**This is Blake's custom css file**/
div.input{
width:107ex; /* on my system this is an 80 char window */
}
%%file /Users/houghb/.jupyter/custom/custom.css
/**This is Blake's custom css file**/
div.input{
width:107ex; /* on my system this is an 80 char window */
}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The file that you edit in the next cell (custom.css) will change how your notebook cells look.
Step2: To use your newly created custom.css you need to start ipython notebook with the command following command.
|
6,854
|
<ASSISTANT_TASK:>
Python Code:
# Note - these lines added to make it work with shared Jupyter Hub instance,
# modifying the system path so that locally installed modules installed with the shell commands below will be found -
# they would need to be modified for your instance, or to install the modules normally remove the --user param
# import sys
# import os
# sys.path.append(os.path.abspath("/...path to your local module install dir..."))
# these are the values we want to test
text1 = 'General Electric Company'
text2 = 'General Electric Co Inc'
import re, math
from collections import Counter
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
vector1 = text_to_vector(text1)
vector2 = text_to_vector(text2)
cosine = get_cosine(vector1, vector2)
print ('Cosine:', cosine)
import difflib
from difflib import SequenceMatcher
m = SequenceMatcher(None, text1, text2)
print (m.ratio())
## %%sh
## pip install fuzzywuzzy --user
## %%sh
## pip install python-Levenshtein --user
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
print (fuzz.ratio(text1, text2))
print (fuzz.partial_ratio(text1, text2))
print (fuzz.token_sort_ratio(text1, text2))
print (fuzz.token_set_ratio(text1, text2))
import numpy as np
# Jaccard Similarity J (A,B) = | Intersection (A,B) | / | Union (A,B) |
def compute_jaccard_similarity_score(x, y):
intersection_cardinality = len(set(x).intersection(set(y)))
union_cardinality = len(set(x).union(set(y)))
return intersection_cardinality / float(union_cardinality)
score = compute_jaccard_similarity_score(text1, text2)
print ("Jaccard Similarity Score: ",score)
## %%sh
## pip install jellyfish --user
import jellyfish
jellyfish.levenshtein_distance(text1,text2)
jellyfish.damerau_levenshtein_distance(text1,text2)
jellyfish.jaro_distance(text1,text2)
jellyfish.jaro_winkler(text1,text2)
jellyfish.match_rating_comparison(text1,text2)
jellyfish.hamming_distance(text1,text2)
jellyfish.soundex(text1)
jellyfish.soundex(text2)
soundexenc = ''
sentence=text1.split()
for word in sentence:
soundexenc = soundexenc+' '+jellyfish.soundex(word)
print(soundexenc)
jellyfish.metaphone(text1)
jellyfish.metaphone(text2)
jellyfish.metaphone(text1) == jellyfish.metaphone(text2)
jellyfish.nysiis(text1)
jellyfish.nysiis(text2)
jellyfish.nysiis(text1) == jellyfish.nysiis(text2)
nysiisenc = ''
sentence=text2.split()
for word in sentence:
nysiisenc = nysiisenc+' '+jellyfish.nysiis(word)
print(nysiisenc)
jellyfish.match_rating_codex(text1)
jellyfish.match_rating_codex(text2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cosine Similarity
Step2: String comparison using cosine similarity https
Step3: Cosine works fine with whole words and word transposition but will start to trip up on CO vs COMPANY and when too much extraneous text is introduced.
Step4: May have some issues where it comes to partial string matches http
Step5: Also wants to use python-Levenshtein to improve speed, but install failed on gcc - will complain below
Step6: Fuzzywuzzy has an interesting "process" function
Step7: taken from https
Step8: Note that soundex and nysiis both appear to just take the first word
|
6,855
|
<ASSISTANT_TASK:>
Python Code:
from pyspark import SparkContext
sc = SparkContext(master = 'local')
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
mtcars = spark.read.csv(path='data/mtcars.csv',
sep=',',
encoding='UTF-8',
comment=None,
header=True,
inferSchema=True)
mtcars.show(n=5, truncate=False)
from pyspark.sql import Row
rdd = sc.parallelize([
Row(x=[1,2,3], y=['a','b','c']),
Row(x=[4,5,6], y=['e','f','g'])
])
rdd.collect()
df = spark.createDataFrame(rdd)
df.show()
import pandas as pd
pdf = pd.DataFrame({
'x': [[1,2,3], [4,5,6]],
'y': [['a','b','c'], ['e','f','g']]
})
pdf
df = spark.createDataFrame(pdf)
df.show()
my_list = [['a', 1], ['b', 2]]
df = spark.createDataFrame(my_list, ['letter', 'number'])
df.show()
df.dtypes
my_list = [['a', 1], ['b', 2]]
df = spark.createDataFrame(my_list, ['my_column'])
df.show()
df.dtypes
my_list = [(['a', 1], ['b', 2])]
df = spark.createDataFrame(my_list, ['x', 'y'])
df.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a DataFrame object
Step2: Create DataFrame with createDataFrame function
Step3: From pandas DataFrame
Step4: From a list
Step5: The following code generates a DataFrame consisting of two columns, each column is a vector column.
|
6,856
|
<ASSISTANT_TASK:>
Python Code:
#from imp import *
#s=load_source('sygma','/home/nugrid/nugrid/SYGMA/SYGMA_online/SYGMA_dev/sygma.py')
#import mpld3
#mpld3.enable_notebook()
import sygma as s
reload(s)
import matplotlib.pyplot as plt
%matplotlib inline
s1=s.sygma(iniZ=0.02,dt=1e7,tend=2e7)
s1.plot_yield_input() #[1,3,5,12][Fe/H]
s1.plot_yield_input(fig=2,xaxis='mini',yaxis='[Fe/H]',iniZ=0.0001,masses=[1,3,12,25],marker='s',color='r',shape='-')
s1.plot_yield_input(fig=3,xaxis='[C/H]',yaxis='[Fe/H]',iniZ=0.0001,masses=[1,3,12,25],marker='x',color='b',shape='--')
s1.plot_mass()
s1.plot_mass(specie='N',shape='--',marker='x')
#s1.plot_mass_multi()
#s1.plot_mass_multi(fig=1,specie=['C','N'],ylims=[],source='all',norm=False,label=[],shape=['-','--'],marker=['o','D'],color=['r','b'],markevery=20)
#plt.legend()
s1.plot_massfrac()
s1.plot_massfrac(yaxis='He-4',shape='--',marker='x')
s1.plot_spectro()
s1.plot_spectro(yaxis='[O/Fe]',marker='x',shape='--')
s1.plot_totmasses()
s1.plot_totmasses(source='agb',shape='--',marker='x')
s1.plot_totmasses(mass='stars',shape=':',marker='^')
import sygma as s
reload(s)
s1=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',imf_bdys=[1,30],special_timesteps=-1,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=True, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn',pop3_table='yield_tables/popIII_h1.txt')
#s1.plot_sn_distr(rate=True,label1='SN1a, rate',label2='SNII, rate',marker1='o',marker2='s')
s1.plot_sn_distr(fig=4,rate=False,label1='SN1a, number',label2='SNII number',marker1='d',marker2='p')
##plt.xlim(1e6,1e10)
#plt.ylabel('Number/Rate')
s1.plot_sn_distr()
s1.plot_sn_distr(fig=5,rate=True,rate_only='',xaxis='time',label1='SN1a',label2='SN2',shape1=':',shape2='--',marker1='o',marker2='s',color1='k',color2='b',markevery=20)
#s1=s.sygma(iolevel=0,mgal=1e11,dt=1e6,tend=1.3e10,imf_type='salpeter',imf_bdys=[1,30],special_timesteps=-1,iniZ=-1,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=True, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn',pop3_table='yield_tables/popIII_h1.txt')
#s1.plot_sn_distr(rate=True,label1='SN1a, rate',label2='SNII, rate',marker1='o',marker2='s')
#s1.plot_sn_distr(rate=False,label1='SN1a, number',label2='SNII number',marker1='d',marker2='p')
#plt.xlim(1e6,1e10)
#plt.ylabel('Number/Rate')
#s1=s.sygma(iniZ=0.0001,dt=1e9,tend=2e9)
#s2=s.sygma(iniZ=0.02)#,dt=1e7,tend=2e9)
reload(s)
s1=s.sygma(iolevel=0,iniZ=0.02,dt=1e8,tend=1e9) #standart not workign
#s2=s.sygma(iniZ=0.02,dt=1e8,tend=1e10)
s1.plot_mass_range_contributions()
s1.plot_mass_range_contributions(fig=7,specie='O',rebin=0.5,label='',shape='-',marker='o',color='b',markevery=20,extralabel=False,log=False)
#s1.plot_mass_range_contributions(fig=7,specie='O',prodfac=True,rebin=0.5,label='',shape='-',marker='o',color='r',markevery=20,extralabel=False,log=False)
import sygma as s
reload(s)
ssp1=s.sygma(iolevel=0,dt=1e8,mgal=1e11,starbursts=[0.1,0.1],tend=1e9,special_timesteps=-1,imf_type='kroupa',imf_bdys=[0.1,100],sn1a_on=False,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_h1.txt', sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
ssp1.plot_star_formation_rate()
ssp1.plot_star_formation_rate(fig=6,marker='o',shape=':')
ssp1.plot_mass_range_contributions(fig=7,specie='H',prodfac=False,rebin=-1,time=-1,label='Total burst',shape='-',marker='o',color='r',markevery=20,extralabel=False,log=False)
ssp1.plot_mass_range_contributions(fig=7,specie='H',prodfac=False,rebin=-1,time=1e8,label='Burst at 1e8',shape='-',marker='o',color='b',markevery=20,extralabel=False,log=False)
#s1.write_evol_table(elements=['H','He','C'])
s1.write_evol_table(elements=['H'],isotopes=['H-1'],table_name='gce_table.txt',interact=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: plot_yield_input
Step2: The following commands plot the ISM metallicity in spectroscopic notation.
Step3: s1.plot_massfrac
Step4: s1.plot_spectro
Step5: s1.plot_totmasses
Step6: Test of SNIa and SNII rate plots
Step7: One point at the beginning for only 1 starburst
Step8: plot_mass_range_contributions
Step9: Tests with two starbursts
Step10: write_evol_table
|
6,857
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
from phoebe import u # units
logger = phoebe.logger()
b = phoebe.default_binary()
b.get_setting()
b['setting']
b['dict_set_all@setting']
b['teff@component']
b.set_value_all('teff@component', 4000)
print(b['value@teff@primary@component'], b['value@teff@secondary@component'])
b['dict_set_all@setting'] = True
b['teff@component'] = 8000
print(b['value@teff@primary@component'], b['value@teff@secondary@component'])
b.set_value_all('teff@component', 6000)
b['dict_set_all@setting'] = False
b['incl']
b['dict_filter@setting'] = {'context': 'component'}
b['incl']
b.filter(qualifier='incl')
b.set_value('dict_filter@setting', {})
b['run_checks_compute@setting']
b.add_dataset('lc')
b.add_compute('legacy')
print(b.run_checks())
b['run_checks_compute@setting'] = ['phoebe01']
print(b.run_checks())
b['auto_add_figure']
b['auto_add_figure'].description
b['auto_remove_figure']
b['auto_remove_figure'].description
b['web_client']
b['web_client'].description
b['web_client_url']
b['web_client_url'].description
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a longger and a new Bundle.
Step2: Accessing Settings
Step3: or via filtering/twig access
Step4: and can be set as any other Parameter in the Bundle
Step5: In our default binary there are temperatures ('teff') parameters for each of the components ('primary' and 'secondary'). If we were to do
Step6: If you want dictionary access to use set_value_all instead of set_value, you can enable this parameter
Step7: Now let's disable this so it doesn't confuse us while looking at the other options
Step8: dict_filter
Step9: In our default binary, there are several inclination parameters - one for each component ('primary', 'secondary', 'binary') and one with the constraint context (to keep the inclinations aligned).
Step10: Now we no longer see the constraint parameters.
Step11: Now let's reset this option... keeping in mind that we no longer have access to the 'setting' context through twig access, we'll have to use methods to clear the dict_filter
Step12: run_checks_compute (/figure/solver/solution)
Step13: auto_add_figure, auto_remove_figure
Step14: web_client, web_client_url
|
6,858
|
<ASSISTANT_TASK:>
Python Code:
# In Google Colab, uncomment this:
# !wget https://bit.ly/2FMJP5K -O setup.py && bash setup.py
# XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import gym
import numpy as np
import pandas as pd
env = gym.make("Taxi-v2")
env.reset()
env.render()
n_states = env.observation_space.n
n_actions = env.action_space.n
print("n_states=%i, n_actions=%i" % (n_states, n_actions))
policy = np.ones(shape=(n_states, n_actions)) * 1 / n_actions
assert type(policy) in (np.ndarray, np.matrix)
assert np.allclose(policy, 1./n_actions)
assert np.allclose(np.sum(policy, axis=1), 1)
def generate_session(policy, t_max=10**4):
Play game until end or for t_max ticks.
:param policy: an array of shape [n_states,n_actions] with action probabilities
:returns: list of states, list of actions and sum of rewards
states, actions = [], []
total_reward = 0.
s = env.reset()
def sample_action(policy, s):
action_p = policy[s, :].reshape(-1,)
#highest_p_actions = np.argwhere(action_p == np.amax(action_p)).reshape(-1,)
#non_zero_p_actions = np.argwhere(action_p > 0).reshape(-1,)
#random_choice = np.random.choice(highest_p_actions)
#random_choice = np.random.choice(non_zero_p_actions)
random_choice = np.random.choice(np.arange(len(action_p)), p=action_p)
return random_choice
for t in range(t_max):
a = sample_action(policy, s) #<sample action from policy(hint: use np.random.choice) >
new_s, r, done, info = env.step(a)
# Record state, action and add up reward to states,actions and total_reward accordingly.
states.append(s)
actions.append(a)
total_reward += r
s = new_s
if done:
break
return states, actions, total_reward
s, a, r = generate_session(policy)
assert type(s) == type(a) == list
assert len(s) == len(a)
assert type(r) in [float, np.float]
# let's see the initial reward distribution
import matplotlib.pyplot as plt
%matplotlib inline
sample_rewards = [generate_session(policy, t_max=1000)[-1] for _ in range(200)]
plt.hist(sample_rewards, bins=20)
plt.vlines([np.percentile(sample_rewards, 50)], [0], [100], label="50'th percentile", color='green')
plt.vlines([np.percentile(sample_rewards, 90)], [0], [100], label="90'th percentile", color='red')
plt.legend()
def select_elites(states_batch, actions_batch, rewards_batch, percentile=50):
Select states and actions from games that have rewards >= percentile
:param states_batch: list of lists of states, states_batch[session_i][t]
:param actions_batch: list of lists of actions, actions_batch[session_i][t]
:param rewards_batch: list of rewards, rewards_batch[session_i]
:returns: elite_states,elite_actions, both 1D lists of states and respective actions from elite sessions
Please return elite states and actions in their original order
[i.e. sorted by session number and timestep within session]
If you are confused, see examples below. Please don't assume that states are integers
(they will become different later).
#<Compute minimum reward for elite sessions. Hint: use np.percentile >
reward_threshold = np.percentile(rewards_batch, percentile)
#elite_states = <your code here >
#elite_actions = <your code here >
elite_states = []
elite_actions = []
for i, reward in enumerate(rewards_batch):
if reward >= reward_threshold:
elite_states = elite_states + states_batch[i]
elite_actions = elite_actions + actions_batch[i]
return elite_states, elite_actions
states_batch = [
[1, 2, 3], # game1
[4, 2, 0, 2], # game2
[3, 1], # game3
]
actions_batch = [
[0, 2, 4], # game1
[3, 2, 0, 1], # game2
[3, 3], # game3
]
rewards_batch = [
3, # game1
4, # game2
5, # game3
]
test_result_0 = select_elites(
states_batch, actions_batch, rewards_batch, percentile=0)
test_result_40 = select_elites(
states_batch, actions_batch, rewards_batch, percentile=30)
test_result_90 = select_elites(
states_batch, actions_batch, rewards_batch, percentile=90)
test_result_100 = select_elites(
states_batch, actions_batch, rewards_batch, percentile=100)
assert np.all(test_result_0[0] == [1, 2, 3, 4, 2, 0, 2, 3, 1]) \
and np.all(test_result_0[1] == [0, 2, 4, 3, 2, 0, 1, 3, 3]),\
"For percentile 0 you should return all states and actions in chronological order"
assert np.all(test_result_40[0] == [4, 2, 0, 2, 3, 1]) and \
np.all(test_result_40[1] == [3, 2, 0, 1, 3, 3]),\
"For percentile 30 you should only select states/actions from two first"
assert np.all(test_result_90[0] == [3, 1]) and \
np.all(test_result_90[1] == [3, 3]),\
"For percentile 90 you should only select states/actions from one game"
assert np.all(test_result_100[0] == [3, 1]) and\
np.all(test_result_100[1] == [3, 3]),\
"Please make sure you use >=, not >. Also double-check how you compute percentile."
print("Ok!")
def update_policy(elite_states, elite_actions):
Given old policy and a list of elite states/actions from select_elites,
return new updated policy where each action probability is proportional to
policy[s_i,a_i] ~ #[occurences of si and ai in elite states/actions]
Don't forget to normalize policy to get valid probabilities and handle 0/0 case.
In case you never visited a state, set probabilities for all actions to 1./n_actions
:param elite_states: 1D list of states from elite sessions
:param elite_actions: 1D list of actions from elite sessions
new_policy = np.zeros([n_states, n_actions])
#<Your code here: update probabilities for actions given elite states & actions >
# Don't forget to set 1/n_actions for all actions in unvisited states.
for state, action in zip(elite_states, elite_actions):
new_policy[state, action] = new_policy[state, action] + 1
for state in range(n_states):
s = np.sum(new_policy[state, :])
if s == 0:
new_policy[state, :] = 1. / n_actions
else:
new_policy[state, :] = new_policy[state, :] / s
return new_policy
elite_states = [1, 2, 3, 4, 2, 0, 2, 3, 1]
elite_actions = [0, 2, 4, 3, 2, 0, 1, 3, 3]
new_policy = update_policy(elite_states, elite_actions)
assert np.isfinite(new_policy).all(
), "Your new policy contains NaNs or +-inf. Make sure you don't divide by zero."
assert np.all(
new_policy >= 0), "Your new policy can't have negative action probabilities"
assert np.allclose(new_policy.sum(
axis=-1), 1), "Your new policy should be a valid probability distribution over actions"
reference_answer = np.array([
[1., 0., 0., 0., 0.],
[0.5, 0., 0., 0.5, 0.],
[0., 0.33333333, 0.66666667, 0., 0.],
[0., 0., 0., 0.5, 0.5]])
assert np.allclose(new_policy[:4, :5], reference_answer)
print("Ok!")
from IPython.display import clear_output
def show_progress(rewards_batch, log, percentile, reward_range=[-990, +10]):
A convenience function that displays training progress.
No cool math here, just charts.
mean_reward = np.mean(rewards_batch)
threshold = np.percentile(rewards_batch, percentile)
log.append([mean_reward, threshold])
clear_output(True)
print("mean reward = %.3f, threshold=%.3f" % (mean_reward, threshold))
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.plot(list(zip(*log))[0], label='Mean rewards')
plt.plot(list(zip(*log))[1], label='Reward thresholds')
plt.legend()
plt.grid()
plt.subplot(1, 2, 2)
plt.hist(rewards_batch, range=reward_range)
plt.vlines([np.percentile(rewards_batch, percentile)],
[0], [100], label="percentile", color='red')
plt.legend()
plt.grid()
plt.show()
# reset policy just in case
policy = np.ones([n_states, n_actions]) / n_actions
n_sessions = 250 # sample this many sessions
percentile = 30 # take this percent of session with highest rewards
learning_rate = 0.5 # add this thing to all counts for stability
log = []
for i in range(100):
%time sessions = [generate_session(policy) for x in range(n_sessions)] #[ < generate a list of n_sessions new sessions > ]
states_batch, actions_batch, rewards_batch = zip(*sessions)
elite_states, elite_actions = select_elites(states_batch, actions_batch, rewards_batch, percentile=percentile) #<select elite states/actions >
new_policy = update_policy(elite_states, elite_actions) #<compute new policy >
policy = learning_rate * new_policy + (1 - learning_rate) * policy
# display results on chart
show_progress(rewards_batch, log, percentile)
from submit import submit_taxi
submit_taxi(generate_session, policy, 'tonatiuh_rangel@hotmail.com', '7uvgN7bBzpJzVw9f')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create stochastic policy
Step3: Play the game
Step6: Crossentropy method steps
Step8: Training loop
Step9: Reflecting on results
|
6,859
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
# Define the Keras model to add callbacks to
def get_model():
model = keras.Sequential()
model.add(keras.layers.Dense(1, input_dim=784))
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=0.1),
loss="mean_squared_error",
metrics=["mean_absolute_error"],
)
return model
# Load example MNIST data and pre-process it
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784).astype("float32") / 255.0
x_test = x_test.reshape(-1, 784).astype("float32") / 255.0
# Limit the data to 1000 samples
x_train = x_train[:1000]
y_train = y_train[:1000]
x_test = x_test[:1000]
y_test = y_test[:1000]
class CustomCallback(keras.callbacks.Callback):
def on_train_begin(self, logs=None):
keys = list(logs.keys())
print("Starting training; got log keys: {}".format(keys))
def on_train_end(self, logs=None):
keys = list(logs.keys())
print("Stop training; got log keys: {}".format(keys))
def on_epoch_begin(self, epoch, logs=None):
keys = list(logs.keys())
print("Start epoch {} of training; got log keys: {}".format(epoch, keys))
def on_epoch_end(self, epoch, logs=None):
keys = list(logs.keys())
print("End epoch {} of training; got log keys: {}".format(epoch, keys))
def on_test_begin(self, logs=None):
keys = list(logs.keys())
print("Start testing; got log keys: {}".format(keys))
def on_test_end(self, logs=None):
keys = list(logs.keys())
print("Stop testing; got log keys: {}".format(keys))
def on_predict_begin(self, logs=None):
keys = list(logs.keys())
print("Start predicting; got log keys: {}".format(keys))
def on_predict_end(self, logs=None):
keys = list(logs.keys())
print("Stop predicting; got log keys: {}".format(keys))
def on_train_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
def on_train_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: end of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: end of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: start of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: end of batch {}; got log keys: {}".format(batch, keys))
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=1,
verbose=0,
validation_split=0.5,
callbacks=[CustomCallback()],
)
res = model.evaluate(
x_test, y_test, batch_size=128, verbose=0, callbacks=[CustomCallback()]
)
res = model.predict(x_test, batch_size=128, callbacks=[CustomCallback()])
class LossAndErrorPrintingCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
print("For batch {}, loss is {:7.2f}.".format(batch, logs["loss"]))
def on_test_batch_end(self, batch, logs=None):
print("For batch {}, loss is {:7.2f}.".format(batch, logs["loss"]))
def on_epoch_end(self, epoch, logs=None):
print(
"The average loss for epoch {} is {:7.2f} "
"and mean absolute error is {:7.2f}.".format(
epoch, logs["loss"], logs["mean_absolute_error"]
)
)
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=2,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
res = model.evaluate(
x_test,
y_test,
batch_size=128,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
import numpy as np
class EarlyStoppingAtMinLoss(keras.callbacks.Callback):
Stop training when the loss is at its min, i.e. the loss stops decreasing.
Arguments:
patience: Number of epochs to wait after min has been hit. After this
number of no improvement, training stops.
def __init__(self, patience=0):
super(EarlyStoppingAtMinLoss, self).__init__()
self.patience = patience
# best_weights to store the weights at which the minimum loss occurs.
self.best_weights = None
def on_train_begin(self, logs=None):
# The number of epoch it has waited when loss is no longer minimum.
self.wait = 0
# The epoch the training stops at.
self.stopped_epoch = 0
# Initialize the best as infinity.
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get("loss")
if np.less(current, self.best):
self.best = current
self.wait = 0
# Record the best weights if current results is better (less).
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
print("Restoring model weights from the end of the best epoch.")
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
steps_per_epoch=5,
epochs=30,
verbose=0,
callbacks=[LossAndErrorPrintingCallback(), EarlyStoppingAtMinLoss()],
)
class CustomLearningRateScheduler(keras.callbacks.Callback):
Learning rate scheduler which sets the learning rate according to schedule.
Arguments:
schedule: a function that takes an epoch index
(integer, indexed from 0) and current learning rate
as inputs and returns a new learning rate as output (float).
def __init__(self, schedule):
super(CustomLearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, "lr"):
raise ValueError('Optimizer must have a "lr" attribute.')
# Get the current learning rate from model's optimizer.
lr = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate))
# Call schedule function to get the scheduled learning rate.
scheduled_lr = self.schedule(epoch, lr)
# Set the value back to the optimizer before this epoch starts
tf.keras.backend.set_value(self.model.optimizer.lr, scheduled_lr)
print("\nEpoch %05d: Learning rate is %6.4f." % (epoch, scheduled_lr))
LR_SCHEDULE = [
# (epoch to start, learning rate) tuples
(3, 0.05),
(6, 0.01),
(9, 0.005),
(12, 0.001),
]
def lr_schedule(epoch, lr):
Helper function to retrieve the scheduled learning rate based on epoch.
if epoch < LR_SCHEDULE[0][0] or epoch > LR_SCHEDULE[-1][0]:
return lr
for i in range(len(LR_SCHEDULE)):
if epoch == LR_SCHEDULE[i][0]:
return LR_SCHEDULE[i][1]
return lr
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
steps_per_epoch=5,
epochs=15,
verbose=0,
callbacks=[
LossAndErrorPrintingCallback(),
CustomLearningRateScheduler(lr_schedule),
],
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 자신만의 콜백 작성하기
Step2: Keras 콜백 개요
Step3: 그런 다음 Keras 데이터세트 API에서 훈련 및 테스트용 MNIST 데이터를 로드합니다.
Step4: 이제 다음의 경우 로깅하는 간단한 사용자 정의 콜백을 정의합니다.
Step5: 사용해 보겠습니다.
Step6: logs dict 사용법
Step8: self.model 속성의 사용법
Step11: 학습 속도 스케줄링
|
6,860
|
<ASSISTANT_TASK:>
Python Code:
target = pd.read_csv('../data/train_target.csv')
target.describe()
target = target / 1000
sns.distplot(target);
plt.title('SalePrice')
import scipy as sp
sp.stats.skew(target)
sp.stats.skewtest(target)
logtarget = np.log1p(target)
print('skewness of logtarget = ', sp.stats.skew(logtarget)[0])
print('skewness test of logtarget = ', sp.stats.skewtest(logtarget))
sns.distplot(logtarget)
plt.title(r'log(1 + SalePrice)')
raw_train = pd.read_csv('../data/train_prepared_light.csv')
raw_test = pd.read_csv('../data/test_prepared_light.csv')
df = pd.concat([raw_train, raw_test], keys=['train', 'test'])
df.shape
ncategories = sum(df.dtypes == object)
ncategories
df.head()
df.tail()
df.columns, len(df.columns)
df.dtypes.unique()
is_categorical = (df.dtypes == object)
is_numerical = ~is_categorical
dfnum = df.loc[:, is_numerical].copy()
dfnum.columns, len(dfnum.columns)
dfnum.describe()
cols_with_nulls = dfnum.columns[dfnum.isnull().sum() > 0]
cols_with_nulls
dfnum.shape
dfnum[cols_with_nulls].isnull().sum().sort_values(ascending=False)
#.plot(kind='bar')
# We may want to refine this in the future. Perhaps build a model to predict the missing GarageCars from the other features?
median_list = 'LotFrontage', 'BsmtFullBath','BsmtHalfBath', 'GarageCars', 'GarageArea'
zero_list = 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'TotalBsmtSF', 'BsmtUnfSF'
for feature in median_list:
dfnum[feature].fillna(dfnum[feature].median(), inplace=True)
for feature in zero_list:
dfnum[feature].fillna(0, inplace=True)
dfnum.GarageYrBlt.fillna(dfnum.YearBuilt[dfnum.GarageYrBlt.isnull()], inplace=True)
# Check that we got rid of the nulls
dfnum.isnull().sum().any()
# Assign to the slice (see the copy / write problem in Pandas)
df.loc[:, is_numerical] = dfnum
def standardize(df):
return sk.preprocessing.StandardScaler().fit_transform(df)
dfnum_t = dfnum.apply(standardize)
dfnum_t.head()
def violinplot(df, ax=None):
if ax is None:
ax = plt.gca()
sns.violinplot(df, ax=ax)
for xlab in ax.get_xticklabels():
xlab.set_rotation(30)
def featureplot(df, nrows=1, figsize=(12,8), plotfunc=violinplot):
Plot the dataframe features
width, height = figsize
fig, axes = plt.subplots(nrows, 1, figsize=(width, height * nrows));
i = 0
plots_per_figure = df.shape[1] // nrows
if nrows == 1:
axes = [axes]
for j, ax in zip(range(plots_per_figure, df.shape[1] + 1, plots_per_figure), axes):
plotfunc(df.iloc[:, i:j], ax=ax)
i = j
dfnum_t.head()
train = dfnum.loc['train',:]
train_t = dfnum_t.loc['train',:]
featureplot(train_t.iloc[:, 0:9])
featureplot(train_t.iloc[:, 9:18])
fig, ax = plt.subplots(1,1, figsize=(4, 4))
sns.distplot(train_t['BsmtFinSF2'], ax=ax)
ax.set_title('Distribution of BsmtFinSF2')
def test_nearly_constant(series):
counts = series.value_counts()
max_val_count = max(counts)
other_val_count = counts.drop(counts.argmax()).sum()
return other_val_count / max_val_count < 0.25
is_nearly_constant = train_t.apply(test_nearly_constant)
is_nearly_constant.value_counts()
dropme = train_t.columns[is_nearly_constant]
dropme
df = df.drop(dropme, axis=1)
train = train.drop(dropme, axis=1)
train_t = train_t.drop(dropme, axis=1)
fig, axes = plt.subplots(1,2, figsize=(8, 4))
sns.distplot(train['LotArea'], ax=axes[0])
sns.distplot(np.log1p(train['LotArea']), ax=axes[1])
zfactors = sp.stats.skewtest(train)[0]
sns.distplot(zfactors)
is_skewed = np.abs(zfactors) > 10
pd.Series(data=zfactors, index=train.columns)[is_skewed].sort_values().plot(kind='barh')
plt.title('Z-factor for skewtest')
assert all(np.sign(sp.stats.skew(train)[is_skewed]) > 0)
def transform_skewed_colums(dfnum, is_skewed=is_skewed):
dfnum: dataframe to transform
dropme: columns to drop
is_skewed: iterable of length dfnum.columns indicating if a column is skewed
dfnum2 = dfnum.copy()
for feature, skewed_feature in zip(dfnum.columns, is_skewed):
if skewed_feature:
dfnum2[feature] = np.log1p(dfnum[feature])
dfnum_t2 = dfnum2.apply(standardize)
return dfnum_t2
# the transformed dataset has fewer columns and we only want those
dfnum_t2 = transform_skewed_colums(df.loc[:, is_numerical])
dfnum_t2.iloc[:, is_skewed].columns
zfactors2 = sp.stats.skewtest(dfnum_t2)[0]
pd.Series(data=zfactors2, index=dfnum_t2.columns)[is_skewed].sort_values().plot(kind='barh')
featureplot(dfnum_t2.iloc[:, is_skewed], nrows=2, figsize=(10,5))
featureplot(dfnum_t2.iloc[:, ~is_skewed], nrows=2, figsize=(10, 5))
dfnum_t2.index.names = ['Dataset', 'Id']
dfnum_t2.head()
dfnum_t2.to_csv('transformed_dataset_dfnum_t2.csv', index=True)
nfeatures = dfnum_t2.columns
target_t = logtarget.apply(standardize)
target_t.head()
dfnum_t2.head()
corr = pd.DataFrame(data=dfnum_t2.loc['train',:].apply(lambda feature: sp.stats.pearsonr(feature, target_t['SalePrice'])),
columns=['pearsonr'])
corr = corr.assign(correlation=corr.applymap(lambda x: x[0]),
pvalue=corr.applymap(lambda x: x[1]))
corr = corr.drop('pearsonr', axis=1)
corr.head()
corr.sort_values('pvalue', ascending=False)['correlation'].plot(kind='barh')
corr.sort_values('pvalue').head()
corr.sort_values('pvalue').tail()
min_correlation = 0.2
key_features = corr[np.abs(corr['correlation'] > min_correlation)].sort_values(by='correlation', ascending=False).index.values
key_features, key_features.size
%store key_features
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The sale price is in hte hundreds of thousands, so let's divide the price by 1000 to get more manageable numbers.
Step2: The distribution is skewed (as demonstrated by the large z-score (and small pvalue) of teh skewtest). It is right skewed (the skew is positive). Skewed distribution are not ideal for linear models, which often assume a normal distribution. One way to correct for right-skewness is to take the log [1,2,3]
Step3: Merge the training and test datasets for data preparation
Step4: Features
Step5: We've got 3 data types
Step6: Split the data between categorical and numerical features
Step7: Numerical features
Step8: We've got 36 numerical features. We can use the describe method to get some statistics
Step9: But that's a lot of numbers to digest. Better get started plotting! To help with plotting, but also to improve linear regression models, we're going to standardize our data. But before that we must deal with the NaN values.
Step10: Based on the description, the null values for the MasVnrArea should be 0 (no massonry veneer type)
Step11: For the GarageYrBlt, replace by the year the house was built.
Step12: Standardize the data
Step14: Plot violinplots for each feature
Step15: Many of the features are higly skewed with very long tails.
Step16: Most of these are right skewed as well. BsmtFullBath has some discrete values (number of bathrooms).
Step17: Some features, such as BsmtFinSF2, are almost constant (blobs with long tail) as can be seen below
Step18: Drop nearly constant features
Step19: Log transform the other features if they have a high skewness
Step20: Check the sign of the skewness for all these
Step22: Let's apply a log1p transform to all these and plot the distributions again
Step23: Now our originally skewed features look more symmetric.
Step24: Save transformed numerical data
Step25: Feature selection
Step26: Let's keep only the features that have a high enough correlation with the price (correlation less than 0.2)
|
6,861
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 2
sample_id = 6
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
return x / 256
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
one_hot_vectors = np.zeros((len(x),10))
i = 0
for label in x:
one_hot_vectors[i][label] = 1
i = i+1
return one_hot_vectors
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a bach of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
return tf.placeholder(tf.float32, [None,image_shape[0],image_shape[1],image_shape[2]], "x")
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
return tf.placeholder(tf.float32, [None,n_classes], "y")
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
return tf.placeholder(tf.float32, name="keep_prob")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# TODO: Implement Function
weight = tf.Variable(tf.truncated_normal([conv_ksize[0], conv_ksize[1], x_tensor.get_shape().as_list()[3], conv_num_outputs],0,5e-2))
bias = tf.Variable(tf.zeros(conv_num_outputs))
strides = [1, conv_strides[0], conv_strides[1], 1]
conv_layer = tf.nn.conv2d(x_tensor, weight, strides, padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
ksize = [1, pool_ksize[0], pool_ksize[1], 1]
kstrides = [1, pool_strides[0], pool_strides[1], 1]
return tf.nn.max_pool(conv_layer, ksize, kstrides, padding='SAME')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
return tf.reshape(x_tensor,[-1,(x_tensor.get_shape().as_list()[1] * x_tensor.get_shape().as_list()[2] * x_tensor.get_shape().as_list()[3])])
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
weights = tf.Variable(tf.truncated_normal([x_tensor.shape.as_list()[1], num_outputs],0,5e-2))
bias = tf.Variable(tf.truncated_normal([num_outputs]))
fc = tf.add(tf.matmul(x_tensor, weights), bias)
return tf.nn.relu(fc)
# return tf.contrib.layers.fully_connected(x_tensor, num_outputs)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
weights = tf.Variable(tf.truncated_normal([x_tensor.shape.as_list()[1], num_outputs],0,5e-2))
bias = tf.Variable(tf.truncated_normal([num_outputs]))
return tf.add(tf.matmul(x_tensor, weights), bias)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x_tensor, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
conv_num_outputs1 = 32
conv_num_outputs2 = 64
conv_num_outputs3 = 128
conv_ksize1 = (3,3)
conv_ksize2 = (4,4)
conv_ksize3 = (5,5)
conv_strides1 = (1,1)
conv_strides2 = (1,1)
conv_strides3 = (1,1)
pool_ksize = (2,2)
pool_strides = pool_ksize
fc1_num_outputs = 1024
fc2_num_outputs = 512
fc3_num_outputs = 256
num_outputs = 10
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
x_tensor = conv2d_maxpool(x_tensor, conv_num_outputs1, conv_ksize1, conv_strides1, pool_ksize, pool_strides)
# x_tensor = tf.nn.dropout(x_tensor, keep_prob)
x_tensor = conv2d_maxpool(x_tensor, conv_num_outputs2, conv_ksize2, conv_strides2, pool_ksize, pool_strides)
x_tensor = tf.nn.dropout(x_tensor, keep_prob)
x_tensor = conv2d_maxpool(x_tensor, conv_num_outputs3, conv_ksize3, conv_strides3, pool_ksize, pool_strides)
# x_tensor = tf.nn.dropout(x_tensor, keep_prob)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
x_tensor = flatten(x_tensor)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
x_tensor = fully_conn(x_tensor, fc1_num_outputs)
# x_tensor = tf.nn.dropout(x_tensor, keep_prob)
x_tensor = fully_conn(x_tensor, fc2_num_outputs)
# x_tensor = tf.nn.dropout(x_tensor, keep_prob)
x_tensor = fully_conn(x_tensor, fc3_num_outputs)
x_tensor = tf.nn.dropout(x_tensor, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
x_tensor = output(x_tensor, num_outputs)
# TODO: return output
return x_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
session.run(optimizer, feed_dict={
x: feature_batch,
y: label_batch,
keep_prob: keep_probability})
pass
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
loss = session.run(cost, feed_dict={
x: feature_batch,
y: label_batch,
keep_prob: 1.})
valid_acc = session.run(accuracy, feed_dict={
x: valid_features,
y: valid_labels,
keep_prob: 1.})
print('Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format(loss, valid_acc))
pass
# TODO: Tune Parameters
epochs = 15
batch_size = 256
keep_probability = 0.75
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# sess.close
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
6,862
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
from phoebe import u # units
logger = phoebe.logger()
b = phoebe.default_binary()
phoebe.list_available_datasets()
b.add_dataset(phoebe.dataset.orb,
compute_times=phoebe.linspace(0,10,20),
dataset='orb01',
component=['primary', 'secondary'])
b.add_dataset('orb',
compute_times=phoebe.linspace(0,10,20),
component=['primary', 'secondary'],
dataset='orb01',
overwrite=True)
b.add_dataset('rv', times=phoebe.linspace(0,10,20), dataset='rv01')
print(b.filter(qualifier='times', dataset='rv01').components)
print(b.filter(qualifier='times', dataset='rv01', check_default=False).components)
print(b.get('times@_default@rv01', check_default=False))
b.add_dataset('lc', times=[0,1], fluxes=[1,0.5], dataset='lc01')
print(b.get_parameter(qualifier='fluxes', dataset='lc01', context='dataset'))
b.add_dataset('rv',
times=[0,1],
rvs=[-3,3],
component='primary',
dataset='rv01',
overwrite=True)
print(b.get_parameter(qualifier='rvs', dataset='rv01', context='dataset'))
b.add_dataset('rv',
times=[0,0.5,1],
rvs=[-3,3],
dataset='rv02')
print(b.filter(qualifier='rvs', dataset='rv02', context='dataset'))
b.add_dataset('rv',
times=[0,0.5,1],
rvs={'primary': [-3,3], 'secondary': [4,-4]},
dataset='rv02',
overwrite=True)
print(b.filter(qualifier='rvs', dataset='rv02', context='dataset'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Adding a Dataset from Arrays
Step2: Without Observations
Step3: Here we used phoebe.linspace. This is essentially just a shortcut to np.linspace, but using nparray to allow these generated arrays to be serialized and stored easier within the Bundle. Other nparray constructor functions available at the top-level of PHOEBE include
Step4: You may notice that add_dataset does take some time to complete. In the background, the passband is being loaded (when applicable) and many parameters are created and attached to the Bundle.
Step5: Here we added an RV dataset and can see that it was automatically created for both stars in our system. Under-the-hood, another entry is created for component='_default'. The default parameters hold the values that will be replicated if a new component is added to the system in the future. In order to see these hidden parameters, you need to pass check_default=False to any filter-type call (and note that '_default' is no longer exposed when calling .components). Also note that for set_value_all, this is automatically set to False.
Step6: With Observations
Step7: For datasets which attach to individual components, however, this isn't always the desired behavior.
Step8: However, for a double-lined RV we probably don't want to do the following
Step9: Instead we want to pass different arrays to the 'rvs@primary' and 'rvs@secondary'. This can be done by explicitly stating the components in a dictionary sent to that argument
|
6,863
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import openpathsampling as paths
import numpy as np
# NBVAL_IGNORE_OUTPUT
import pyemma.coordinates as coor
# NBVAL_IGNORE_OUTPUT
ref_storage = paths.Storage('engine_store_test.nc', mode='r')
# NBVAL_IGNORE_OUTPUT
storage = paths.Storage('delete.nc', 'w')
storage.trajectories.save(ref_storage.trajectories[0])
# NBVAL_IGNORE_OUTPUT
def pyemma_generator(f):
f.add_inverse_distances(f.pairs(f.select_Backbone()))
# NBVAL_IGNORE_OUTPUT
cv = paths.collectivevariable.PyEMMAFeaturizerCV(
'pyemma',
pyemma_generator,
topology=ref_storage.snapshots[0].topology
).with_diskcache()
cv(ref_storage.trajectories[0]);
# NBVAL_IGNORE_OUTPUT
print(storage.save(cv))
# NBVAL_IGNORE_OUTPUT
cv(storage.trajectories[0]);
# NBVAL_IGNORE_OUTPUT
cv(storage.snapshots.all());
# NBVAL_IGNORE_OUTPUT
py_cv = storage.cvs['pyemma']
store = storage.stores['cv%d' % storage.idx(py_cv)]
nc_var = store.variables['value']
assert(nc_var.shape[1] == 15)
print(nc_var.shape[1])
assert(nc_var.var_type == 'numpy.float32')
print(nc_var.var_type)
# NBVAL_IGNORE_OUTPUT
print(storage.variables['attributes_json'][:])
# NBVAL_IGNORE_OUTPUT
py_cv_idx = storage.idx(py_cv)
print(py_cv_idx)
py_emma_feat = storage.vars['attributes_json'][py_cv_idx]
# NBVAL_IGNORE_OUTPUT
erg = py_emma_feat(storage.snapshots);
# NBVAL_IGNORE_OUTPUT
print(erg[:,2:4])
storage.close()
ref_storage.close()
# NBVAL_IGNORE_OUTPUT
storage = paths.Storage('delete.nc', 'r')
cv = storage.cvs[0]
assert np.allclose(erg, cv(storage.snapshots))
storage.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import a PyEmma Coordinates Module
Step2: Now use this featurizer generating function to build a collective variable out of it. All we need for that is a name as usual, the generating function, the list of parameters - here only the topology and at best a test snapshot, a template.
Step3: Let's save it to the storage
Step4: and apply the featurizer to a trajectory
Step5: Sync to make sure the cache is written to the netCDF file.
Step6: Make sure that we get the same result
|
6,864
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-mh', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
6,865
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import YouTubeVideo
YouTubeVideo('U_Q6Xjz9QHg', width=720, height=400, loop=1, autoplay=0, playlist='U_Q6Xjz9QHg')
from IPython.display import YouTubeVideo
YouTubeVideo('jHxyP-nUhUY', width=500, height=400, autoplay=0, start=60)
from IPython.display import HTML
HTML('<H2>A Neuron</H2><p><iframe src="https://rawgit.com/celiasmith/syde556/master/lecture1/neuron.html" width=825 height=475></iframe>')
from IPython.display import YouTubeVideo
YouTubeVideo('F37kuXObIBU', width=720, height=500, start=8*60+35)
from IPython.display import YouTubeVideo
YouTubeVideo('KE952yueVLA', width=640, height=390)
from IPython.display import YouTubeVideo
YouTubeVideo('lfNVv0A8QvI', width=640, height=390)
from IPython.display import YouTubeVideo
YouTubeVideo('DGBy-BGiZIM', width=640, height=360)
from IPython.display import YouTubeVideo
YouTubeVideo('CpejbZ-XEyM', width=640, height=360)
from IPython.display import YouTubeVideo
YouTubeVideo('v7uRFVR9BPU', width=640, height=390)
from IPython.display import YouTubeVideo
YouTubeVideo('_UFOSHZ22q4', width=600, height=400, start=60)
from IPython.display import YouTubeVideo
YouTubeVideo('WmChhExovzY', width=600, height=400)
from IPython.display import YouTubeVideo
YouTubeVideo('2j9rRHChtXk', width=640, height=390)
from IPython.display import YouTubeVideo
YouTubeVideo('VWUhCzUDZ70', width=640, height=390)
from IPython.display import YouTubeVideo
YouTubeVideo('sUvHCs5y0o8', width=640, height=360)
from IPython.display import YouTubeVideo
YouTubeVideo('f6Ul5TYK5-o', width=640, height=360)
from IPython.display import YouTubeVideo
YouTubeVideo('WNnMhF7rnYo', width=640, height=390)
from IPython.display import YouTubeVideo
YouTubeVideo('mP7DX6x9PX8', width=640, height=390)
from IPython.display import YouTubeVideo
YouTubeVideo('Q_LRvnwnYp8', width=640, height=390)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overall Goal
Step2: Brain structures
Step3: Neurons in the brain
Step4: Kinds of data from the brain
Step5: <img src="files/lecture1/catgratings.gif" width="400" style="float
Step6: Calcium Imaging
Step7: In a stalking fish
Step8: Optogenetics
Step9: What do we know so far?
Step10: Billions of neurons
Step11: The controversy
Step12: Vision
Step13: Problem solving
Step14: Spaun
Step15: Spaun
Step16: Spaun
Step17: Spaun
|
6,866
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-7, 3e-7,5e-7]
regularization_strengths = [5e4, 1e4]
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
num_iters = 1000
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
for learning_rate in learning_rates:
for regularization_strength in regularization_strengths:
print "learning_rage {:.2e}, regularization_strength {:.2e}".format(learning_rate, regularization_strength)
#train it
svm = LinearSVM()
svm.train(X_train_feats, y_train, learning_rate=learning_rate, reg=regularization_strength,
num_iters=num_iters, verbose=True)
#predict
y_train_pred = svm.predict(X_train_feats)
training_accuracy = np.mean(y_train == y_train_pred)
y_val_pred = svm.predict(X_val_feats)
validation_accuracy = np.mean(y_val == y_val_pred)
results[(learning_rate,regularization_strength)] = training_accuracy, validation_accuracy
print "train accurcy {}, validation {}".format(training_accuracy, validation_accuracy)
if validation_accuracy > best_val:
best_val = validation_accuracy
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print test_accuracy
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
print X_train_feats.shape
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
num_iters = 1800
batch_size=200
# hyperparameters
learning_rate = 5e-1
reg = 1e-6
learning_rate_decay = 0.95
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=num_iters,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_decay= learning_rate_decay,
reg=reg,
verbose=False)
# Predict on the validation set
val_acc = (net.predict(X_val_feats) == y_val).mean()
train_acc = (net.predict(X_train_feats) == y_train).mean()
print 'Train accuracy:{}, Validation accuracy:{}'.format(train_acc, val_acc)
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
################################################################################
# END OF YOUR CODE #
################################################################################
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (net.predict(X_test_feats) == y_test).mean()
print test_acc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Inline question 1
|
6,867
|
<ASSISTANT_TASK:>
Python Code:
print 'This is a string'
s = 'STRING'
print 'Place another string with a mod and s: %s' %(s)
print 'Floating point numbers: %1.2f' %(13.144)
print 'Floating point numbers: %1.0f' %(13.144)
print 'Floating point numbers: %1.5f' %(13.144)
print 'Floating point numbers: %10.2f' %(13.144)
print 'Floating point numbers: %25.2f' %(13.144)
print 'Here is a number: %s. Here is a string: %s' %(123.1,'hi')
print 'Here is a number: %r. Here is a string: %r' %(123.1,'hi')
print 'First: %s, Second: %1.2f, Third: %r' %('hi!',3.14,22)
print 'This is a string with an {p}'.format(p='insert')
# Multiple times:
print 'One: {p}, Two: {p}, Three: {p}'.format(p='Hi!')
# Several Objects:
print 'Object 1: {a}, Object 2: {b}, Object 3: {c}'.format(a=1,b='two',c=12.3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Strings
Step2: Floating Point Numbers
Step3: Conversion Format methods.
Step4: Multiple Formatting
Step5: Using the string .format() method
|
6,868
|
<ASSISTANT_TASK:>
Python Code:
import jax
import jax.numpy as jnp
import numpy as np
from matplotlib import pyplot as plt
# Check connected accelerators. Depending on what runtime you're connected to,
# this will show a single CPU/GPU, or 8 TPU cores (jf_2x2 aka JellyDonut).
# You can start a TPU runtime via : "Connect to a runtime" -> "Start" ->
# "Borg Runtime" -> "Brain Frameworks JellyDonut (go/ml-colab)"
# https://screenshot.googleplex.com/87HTCpQNhBKUZUp
# See also http://go/research-workflow-intro-deck#colab
jax.devices()
# Local devices: In this case it's the same as all devices, but if you run JAX
# in a multi host setup, then local_devices will only show the devices connected
# to the host running the program.
jax.local_devices()
# Alternatively, you can also connect to GPU runtime.
!nvidia-smi
# YOUR ACTION REQUIRED:
# Your task is to use JAX to generate 5 uniform random numbers and 5 normally
# distributed random numbers.
# Check out the following JAX API calls:
# - jax.random.PRNGKey()
# - jax.random.split()
# - jax.random.uniform()
# - jax.random.normal()
# Let's do some semi-serious matrix multiplication:
k = 3_000
x = np.random.normal(size=[k, k])
# ~3.4s
%time x @ x
# YOUR ACTION REQUIRED: Do the same computation using JAX!
# You should use result.block_until_ready() for a fair comparison.
# Note the different class of the JAX array. There is additional API e.g. to
# determine on which device the data is stored, check out x.device_buffer
# Combining jnp & np : Below array initialization is rather slow because we
# create a lot of jnp array. Replace jnp with np and observe the speedup!
%%time
# GPU : 1.79s
# CPU : 1.04s
x = jnp.array([jnp.arange(100) for _ in range(10000)])
print(repr(x))
# YOUR ACTION REQUIRED:
# In this situation we would want to create the array in np and then convert it
# to a jnp array using jnp.array() or jax.device_put().
# (Note that we could use np.tile() here, but that's not the point)
def sigmoid(x):
return 0.5 * (1 + jnp.tanh(x))
# YOUR ACTION REQUIRED:
# Use grad() to create a new function that computes the gradient of `sigmoid`.
# Verify the output of the new function at some points.
def f(x, y):
return 2 * x * y**2
# YOUR ACTION REQUIRED:
# Compute df/dx and df/dy with grad()
# Now let's plot the gradient of the sigmoid function in the range [05, 5]
xs = jnp.linspace(-5, 5, 100)
# We can of course evaluate the gradient at every position separately:
grads = [jax.grad(sigmoid)(x) for x in xs]
plt.plot(xs, grads);
# But JAX can "vectorize" our gradient function for us automatically.
# YOUR ACTION REQUIRED:
# Read the documentation about `vmap` and reimplement the plot without a Python
# loop.
# Another vmap() example : Let's re-implement matmul using vector dot product:
vdp = lambda v1, v2: v1.dot(v2)
# Vector dot product:
vdp(jnp.arange(1, 4), jnp.arange(1, 4))
# Matrix vector product:
mvp = jax.vmap(vdp, in_axes=(0, None), out_axes=0)
# Matrix matrix product:
mmp = jax.vmap(mvp, in_axes=(None, 1), out_axes=1)
# Verify result.
m1 = jnp.arange(12).reshape((3, 4))
m2 = m1.reshape((4, 3))
# In case you were wondering : Since Python 3.5 we have `.__matmul__()` operator
# that happens to use the same character as for decorators (cf. `@jit` below).
mmp(m1, m2) - m1 @ m2
# YOUR ACTION REQUIRED:
# It's curry time!
# Try re-implementing mvp() but this time without using the in_axes=, and
# out_axes=. Instead use lambda expressions to (un)curry the arguments in such
# a way that vmap()'s default in_axes=0 and out_axes=0 does the job.
# (You can also re-implement mmp() this way, but it involves transposing).
# JAX would not have the final X in it's name if it were not for XLA, the
# magic sauce that somehow takes computation defined in a function as input
# and produces a much faster version of it.
# @jax.jit
def f(x):
y = x
for _ in range(10):
y = y - 0.1 * y + 3.0
return y[:100, :100]
x = jax.random.normal(jax.random.PRNGKey(0), (5000, 5000))
%timeit f(x).block_until_ready()
# YOUR ACTION REQUIRED:
# Move your magic JAX wand and cast a spell by removing a single character from
# above example, drastically speeding up the computation!
# Note: JIT unrolls the for loop and converts all computations to XLA
# primitives. XLA is then smart enough to fuse kernels for multiplication and
# addition, and optimize the program to only compute those parts that are
# actually needed for the function result...
# Just to be clear : `@jit` is Python's decorator syntax [1], you can also use
# jit() like the other function transformations.
# [1] https://www.python.org/dev/peps/pep-0318
@jax.jit
def f1_jit(x):
return x**0.5
def f2(x):
return x**0.5
# It's really the same.
f2_jit = jax.jit(f2)
f1_jit(2) - f2_jit(2)
# What you need to understand about JIT (1/3): When a function is traced.
@jax.jit
def noop(x):
# This statement only gets executed when the function is traced, i.e. every
# time you execute the JIT-ted version with a new ShapedArray (different dtype
# and/or different shape).
print("Tracing noop:", x)
return x
noop(jnp.arange(3)) # Tracing.
noop(jnp.arange(3) + 1) # Using trace from cache.
noop(jnp.arange(4)) # Tracing.
noop(jnp.arange(4.0)) # Tracing.
noop(jnp.arange(1.0, 5.0)) # Using trace from cache.
# What you need to understand about JIT (2/3): Baking in environment.
magic_number = 13
@jax.jit
def add_magic(x):
return x + magic_number
print(add_magic(np.array([0])))
magic_number = 42
print(add_magic(np.array([0])))
print(add_magic(np.array([0.0])))
# What you need to understand about JIT (2/3): Value-dependent flow.
def mult(x, n):
print("Tracing mult:", x, n)
tot = 0
while n > 0:
tot += x
n -= 1
return tot
# The problem:
# The following statement fails, because : JIT will generate the function's XLA
# code by tracing it with `ShapedArray`'s. These arrays have only their shape
# and datatype defined. Hence, if there are any statements involving the actual
# *values* of the parameters, JIT does not know what to do and raises an
# exception.
# (Note that if mult were traced with `ConcreteArray`s then the trace would work
# just fine; you can see that when executing `grad(mult)(3., 2.)`)
try:
jax.jit(mult)(3, 2)
except Exception as e:
print(f"\n### FAILED WITH : {e}")
# How can we fix this ??
# Solution 1 : static_argnums
jax.jit(mult, static_argnums=1)(3, 4)
jax.jit(mult, static_argnums=1)(3, 5)
jax.jit(mult, static_argnums=1)(3, 6)
# By the way : did you notice how the function is traced exactly three times the
# first time this cell is executed, but not when you re-execute the same cell?
# That's because JIT-ted functions are cached. If You want to observe the
# tracing a second time, you first need to execute above cell so that `mult`
# gets redefined and the cache needs to be updated with the new definition.
# Solution 2 : (un)currying
# YOUR ACTION REQUIRED:
# Use jit() without `static_argnums=`, but (un)curry the function mult instead.
# Solution 3 : Use XLA primitives for control flow.
# Remember: You can inspect `jax.lax.while_loop()` docs by either:
# - Go to https://jax.readthedocs.io
# - Execute a cell containing `?jax.lax.while_loop`
# - Hover your mouse over `while_loop` and wait two seconds
def mult_(x, n):
print("Tracing mult_:", x, n)
def cond_fun(n_tot):
n, tot = n_tot
return n > 0
def body_fun(n_tot):
n, tot = n_tot
return (n - 1, tot + x)
return jax.lax.while_loop(cond_fun, body_fun, (n, 0))
jax.jit(mult_)(3, 4)
jax.jit(mult_)(3, 5)
jax.jit(mult_)(3, 6)
# Woah! Wasn't JAX supposed to be fast !? What is going on here ??
# Also note that increasing the second number significantly will crash
# your runtime...
%%time
jax.jit(mult, static_argnums=1)(3, 5000)
# Does this function have the same problems? Why not?
%%time
jax.jit(mult_)(3, 5000)
# Parallel computing is more fun with multiple devices :-)
# Go back to "Initialization" and connect to a different runtime if you're
# running on a single device.
assert jax.device_count() == 8, "Please connect to a JellyDonut runtime!"
# By default in_axes=0, so pmap() will split every incoming tensor across it's
# first axis - which should be sized jax.local_device_count().
# The computations are then performed in parallel and the results are returned
# as a sharded device array. The dat remains on the individual accelerators.
# Note that pmap() also XLA-compiles the function, so no need to call jit().
# Generate 8 different random seeds.
keys = jax.random.split(jax.random.PRNGKey(0), 8)
# Generate 8 different random matrices. Data remains on devices.
mats = jax.pmap(lambda key: jax.random.normal(key, (8_000, 8_000)))(keys)
# Perform 8 matmuls in parallel.
results = jax.pmap(lambda m1, m2: m1 @ m2)(mats, mats)
# YOUR ACTION REQUIRED:
# Fetch the mean of thes matrices from every device and print it out here.
import functools
# Here we use jax.lax.psum() to do computations across devices. Note that these
# operations can cause a lot of communication costs. Below we split our 8
# devices along two axis (4x2).
# Note in particular that parallel operators work across hosts! We can't
# demonstrate this in a Colab, but you will encounter it later in the Flax
# examples and brain templates.
# You can read more about parallel operators here:
# https://jax.readthedocs.io/en/latest/jax.lax.html#parallel-operators
# axis 0 : rows
@functools.partial(jax.pmap, axis_name="rows")
# axis 1 : columns
@functools.partial(jax.pmap, axis_name="cols")
def f(x):
# across the rows (= column sum)
row_sum = jax.lax.psum(x, "rows")
# across the cols (= row sum)
col_sum = jax.lax.psum(x, "cols")
total_sum = jax.lax.psum(x, ("rows", "cols"))
return row_sum, col_sum, total_sum
# YOUR ACTION REQUIRED:
# Create an array, feed it to f() and verify the correctness of the results
# Whenever we encounter a function argument, e.g. the `params` for a model, or
# the first argument to `grad()` to whose respect we perform automatic
# differentiation, it can really be a "pytree" of `jnp.ndarray`. A pytree
# consists of an arbitrary combination of Python dict/list/tuple and allows us
# to structure our data hierarchically.
# This is a pytree:
data = dict(
array_3x2=jnp.arange(6.0).reshape((3, 2)),
mixed_tuple=(0.1, 0.2, 0.3, [1.0, 2.0, 3.0]),
subdict=dict(
array_3x4=jnp.arange(12.0).reshape((3, 4)),
array_4x3=jnp.arange(12.0).reshape((4, 3)),
),
)
# Call a function over all values, output resulting tree:
jax.tree_map(jnp.shape, data)
# Define a function that does some computation with the values:
def sumsquares(x):
value_flat, value_tree = jax.tree_flatten(x)
del value_tree # not needed.
tot = 0
for value in value_flat:
if isinstance(value, jnp.ndarray):
value = value.sum()
tot += value**2
return tot
sumsquares(data)
# Compute gradients. Remember that grad() computes gradients wrt the first
# argument, but that first argument can be an arbitrarily complex pytree (like
# all the weights in your hierarchical model).
grads = jax.grad(sumsquares)(data)
grads
# YOUR ACTION REQUIRED:
# Take a step against the gradients using `jax.tree_multimap()`
# Our one stop shop for datasets. If you use dataset preprocessing, then those
# computations will be performed with a Tensorflow graph. But we don't really
# need to understand the details, but rather use the API to stream through the
# dataset and then use JAX for computations.
import tensorflow_datasets as tfds
# Don't like fashion? Go checkout the other image classification datasets:
# https://www.tensorflow.org/datasets/catalog/overview#image_classification
# (actually, go and check them out, even if you like fashion...)
ds, ds_info = tfds.load("fashion_mnist", with_info=True)
tfds.show_examples(ds["train"], ds_info, rows=4, cols=6);
# We're not really interested in tf.data preprocessing here, so let's just fetch
# all the data as a jax.ndarray...
def ds_get_all(ds, *keys):
Returns jnp.array() for specified `keys` from entire dataset `ds`.
d = next(iter(ds.batch(ds.cardinality())))
return tuple(jnp.array(d[key]._numpy()) for key in keys)
train_images, train_labels = ds_get_all(ds["train"], "image", "label")
train_images /= 255.0
test_images, test_labels = ds_get_all(ds["test"], "image", "label")
test_images /= 255.0
train_images.shape, train_labels.shape # labels as indices, not one-hot
# YOUR ACTION REQUIRED:
# Implement the body of this function.
def linear_init(key, input_shape, n_classes):
Initializes parameters for a linear classifier.
Args:
key: a PRNGKey used as the random key.
input_shape: Shape of a single input example.
n_classes: Number of output classes.
Returns:
A pytree to be used as a first argument with `linear_apply()`.
pass
# YOUR ACTION REQUIRED:
# Implement the body of this function.
def linear_apply(params, inp):
Computes logits for a SINGLE EXAMPLE.
Args:
params: A pytree as returned by `linear_init()`.
inp: A single input example.
Returns:
Logits (i.e. values that should be normalized by `jax.nn.softmax()` to get a
valid probability distribution over the output classes).
pass
# Initialize classifier & run on a single example.
params = linear_init(
key=jax.random.PRNGKey(0),
input_shape=train_images[0].shape,
n_classes=ds_info.features["label"].num_classes,
)
print(jax.tree_map(jnp.shape, params))
linear_apply(params, train_images[0])
def loss_fun(params, inputs, targets):
Compute x-entropy loss for a batch of images.
Args:
params: a pytree as returned by `linear_init()`.
inputs: batch of images
targets: batch of target labels (indices)
Returns:
The loss value.
# Note that we defined linear_apply() for a single example and how we use
# `vmap()` here to vectorize the function.
logits = jax.vmap(linear_apply, in_axes=(None, 0))(params, inputs)
# We go from logits directly to log(probs):
logprobs = logits - jax.scipy.special.logsumexp(
logits, axis=-1, keepdims=True
)
# Note: targets are indices.
return -logprobs[jnp.arange(len(targets)), targets].mean()
loss_fun(params, train_images[:2], train_labels[:2])
# This is a good moment to compile our computations using `jit()` !
# REMEMBER: Since we "bake in" all globals when `jit()` is called, you will need
# to re-execute this cell every time you change some code `update_step()`
# depends on (like e.g. `loss_fun()`, or `linear_apply()`).
@jax.jit
def update_step(params, inputs, targets):
Take a single optimization step.
Args:
params: A pytree as returned by `linear_init()`.
inputs: batch of images
targets: batch of target labels (indices)
Returns:
A tuple (updated_params, loss).
loss, grads = jax.value_and_grad(loss_fun)(params, inputs, targets)
# Opimize using SGD
updated_params = jax.tree_multimap(
lambda param, grad: param - 0.05 * grad, params, grads
)
return updated_params, loss
update_step(params, train_images[:2], train_labels[:2])
# Step 4 : Do the training by calling `update_step()` repeatedly.
def train(params, steps, batch_size=128):
losses = []
steps_per_epoch = len(train_images) // batch_size
for step in range(steps):
i0 = (step % steps_per_epoch) * batch_size
# Training is simply done by calling `update_step()` repeatedly and
# replacing `params` with `updated_params` returned by `update_step()`.
params, loss = update_step(
params,
train_images[i0 : i0 + batch_size],
train_labels[i0 : i0 + batch_size],
)
losses.append(float(loss))
return params, jnp.array(losses)
learnt_params, losses = train(params, steps=1_000)
plt.plot(losses)
print("final loss:", np.mean(losses[-100]))
# Compute accuracy of linear model.
def accuracy(params, inputs, targets):
logits = jax.vmap(linear_apply, in_axes=(None, 0))(params, inputs)
return (targets == logits.argmax(axis=-1)).mean()
accuracy(learnt_params, test_images, test_labels)
# from typing import Callable, Sequence # used ?
import flax
from flax import linen as nn
# Simple module with matmul layer. Note that we could build this in many
# different ways using the `scope` for parameter handling.
class Matmul:
def __init__(self, features):
self.features = features
def kernel_init(self, key, shape):
return jax.random.normal(key, shape)
def __call__(self, scope, x):
kernel = scope.param(
"kernel", self.kernel_init, (x.shape[1], self.features)
)
return x @ kernel
class Model:
def __init__(self, features):
self.matmuls = [Matmul(f) for f in features]
def __call__(self, scope, x):
x = x.reshape([len(x), -1])
for i, matmul in enumerate(self.matmuls):
x = scope.child(matmul, f"matmul_{i + 1}")(x)
if i < len(self.matmuls) - 1:
x = jax.nn.relu(x)
x = jax.nn.log_softmax(x)
return x
model = Model([ds_info.features["label"].num_classes])
y, variables = flax.core.init(model)(key, train_images[:1])
assert (y == flax.core.apply(model)(variables, train_images[:1])).all()
# YOUR ACTION REQUIRED:
# Check out the parameter structure, try adding/removing "layers" and see how it
# changes
# YOUR ACTION REQUIRED:
# Redefine loss_fun(), update_step(), and train() from above to train the new
# model.
# Reimplementation of above model using the Linen API.
class Model(nn.Module):
num_classes: int
def setup(self):
self.dense = nn.Dense(self.num_classes)
def __call__(self, x):
x = x.reshape([len(x), -1])
x = self.dense(x)
x = nn.log_softmax(x)
return x
model = Model(num_classes=ds_info.features["label"].num_classes)
variables = model.init(jax.random.PRNGKey(0), train_images[:1])
jax.tree_map(jnp.shape, variables)
# YOUR ACTION REQUIRED:
# 1. Rewrite above model using the @nn.compact notation.
# 2. Extend the model to use additional layers, see e.g.
# convolutions in
# http://google3/third_party/py/flax/linen/linear.py
model = Model(ds_info.features["label"].num_classes)
variables = model.init(key, train_images[:1])
jax.tree_map(jnp.shape, variables)
# Reimplementation of training loop using a Flax optimizer.
@jax.jit
def update_step_optim(optim, inputs, targets):
def loss_fun(params):
logits = model.apply(dict(params=params), inputs)
logprobs = logits - jax.scipy.special.logsumexp(
logits, axis=-1, keepdims=True
)
return -logprobs[jnp.arange(len(targets)), targets].mean()
loss, grads = jax.value_and_grad(loss_fun)(optim.target)
return optim.apply_gradient(grads), loss
def train_optim(optim, steps, batch_size=128):
losses = []
steps_per_epoch = len(train_images) // batch_size
for step in range(steps):
i0 = (step % steps_per_epoch) * batch_size
optim, loss = update_step_optim(
optim,
train_images[i0 : i0 + batch_size],
train_labels[i0 : i0 + batch_size],
)
losses.append(float(loss))
return optim, jnp.array(losses)
optim = flax.optim.adam.Adam(learning_rate=0.01).create(variables["params"])
learnt_optim, losses = train_optim(optim, steps=1_000)
plt.plot(losses)
print("final loss:", np.mean(losses[-100]))
# Re-evaluate accuracy.
(
model.apply(dict(params=learnt_optim.target), test_images).argmax(axis=-1)
== test_labels
).mean()
# Let's add batch norm!
# I'm not saying it's a good idea here, but it will allow us study the changes
# we need to make for models that have state.
class Model(nn.Module):
num_classes: int
@nn.compact
def __call__(self, x, *, train):
x = x.reshape([len(x), -1])
x = nn.BatchNorm(use_running_average=not train)(x)
x = nn.Dense(self.num_classes)(x)
x = nn.log_softmax(x)
return x
model = Model(num_classes=ds_info.features["label"].num_classes)
variables = model.init(jax.random.PRNGKey(0), train_images[:1], train=True)
jax.tree_map(jnp.shape, variables)
# Note the new "batch_stats" collection !
# YOUR ACTION REQUIRED:
# Check below code and add comments for every change compared to the model above
# without state.
@jax.jit
def update_step_optim(optim, batch_stats, inputs, targets):
def loss_fun(params):
logits, mutated_state = model.apply(
dict(params=params, batch_stats=batch_stats),
inputs,
mutable="batch_stats",
train=True,
)
logprobs = logits - jax.scipy.special.logsumexp(
logits, axis=-1, keepdims=True
)
return (
-logprobs[jnp.arange(len(targets)), targets].mean(),
variables["batch_stats"],
)
(loss, state), grads = jax.value_and_grad(loss_fun, has_aux=True)(
optim.target
)
return optim.apply_gradient(grads), batch_stats, loss
def train_optim(optim, batch_stats, steps, batch_size=128):
losses = []
steps_per_epoch = len(train_images) // batch_size
for step in range(steps):
i0 = (step % steps_per_epoch) * batch_size
optim, batch_stats, loss = update_step_optim(
optim,
batch_stats,
train_images[i0 : i0 + batch_size],
train_labels[i0 : i0 + batch_size],
)
losses.append(float(loss))
return optim, batch_stats, jnp.array(losses)
optim = flax.optim.adam.Adam(learning_rate=0.01).create(variables["params"])
learnt_optim, batch_stats, losses = train_optim(
optim, variables["batch_stats"], steps=1_000
)
plt.plot(losses)
print("final loss:", np.mean(losses[-100]))
# YOUR ACTION REQUIRED:
# Make predictions with above model with state
# YOUR ACTION REQURIED:
# Store the Colab in your personal drive and modify it to use the dataset from
# above.
# While this might sound boring, you will learn the following things:
# - how to load files in public Colab from Github, modify them in the UI and
# optionally store them on your personal Google Drive.
# - how to use inline TensorBoard on public Colab and export it to tensorboard.dev
# YOUR ACTION REQUIRED:
# 1. Fork the MNIST example.
# 2. Launch on Xmanager.
# 3. Check out the Colab.
# 4. Replace the dataset with the fashion mnist dataset from above.
# 5. Re-run all tests to and fix if necessary.
# 6. Launch modified version on Xmanager.
# 6. Run Colab again with your updated code.
# YOUR ACTION REQUIRED:
# Check out the code of the MNIST and imagenet examples.
# What differences do you see?
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Randomness
Step2: jnp vs. np
Step3: grad()
Step4: vmap()
Step5: jit()
Step6: pmap()
Step7: pytrees
Step9: JAX Linear Classifier
Step12: Step 1
Step14: Step 2
Step16: Step 3
Step17: Step 4
Step18: Flax
Step19: Functional core
Step20: Stateless Linen module
Step21: Linen module with state
Step22: Modify MNIST example
Step23: Brain templates
|
6,869
|
<ASSISTANT_TASK:>
Python Code:
import numpy
import keras
from keras import backend as K
from keras import losses, optimizers, regularizers
from keras.datasets import mnist
from keras.layers import Activation, ActivityRegularization, Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
model = Sequential()
model.add(Dense(64, input_shape=(784,), activation='relu'))
model.add(Dense(10, activation='softmax'))
model = Sequential()
model.add(Dense(64, input_shape=(784,)))
model.add(Activation('tanh'))
model.add(Dense(10))
model.add(Activation('softmax'))
model = Sequential()
model.add(Dense(64, input_shape=(784,),
activation=K.sigmoid))
model.add(Dense(10, activation='softmax'))
model = Sequential()
model.add(Dense(64, input_shape=(784,),
activation='relu',
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
model.add(Dense(10, activation='softmax'))
model = Sequential()
model.add(Dense(64, input_shape=(784,), activation='relu'))
model.add(ActivityRegularization(l1=0.01, l2=0.1))
model.add(Dense(10, activation='softmax'))
model = Sequential()
model.add(Dense(64, input_shape=(784,), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model = Sequential()
model.add(Dense(64, input_shape=(784,), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Simple 1 layer denoising autoencoder
model = Sequential()
model.add(Dense(200, input_shape=(784,), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(784))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=losses.mean_squared_error, optimizer=sgd)
batch_size = 128
num_classes = 10
epochs = 10
TRAIN_EXAMPLES = 20000
TEST_EXAMPLES = 5000
# image dimensions
img_rows, img_cols = 28, 28
# load the data (already shuffled and splitted)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# reshape the data to add the "channels" dimension
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# normalize the input in the range [0, 1]
# to make quick runs, select a smaller set of images.
train_mask = numpy.random.choice(x_train.shape[0], TRAIN_EXAMPLES, replace=False)
x_train = x_train[train_mask, :].astype('float32')
y_train = y_train[train_mask]
test_mask = numpy.random.choice(x_test.shape[0], TEST_EXAMPLES, replace=False)
x_test = x_test[test_mask, :].astype('float32')
y_test = y_test[test_mask]
x_train /= 255
x_test /= 255
print('Train samples: %d' % x_train.shape[0])
print('Test samples: %d' % x_test.shape[0])
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
# define the network architecture
model = Sequential()
model.add(Conv2D(filters=16,
kernel_size=(3, 3),
strides=(1,1),
padding='valid',
activation='relu',
input_shape=input_shape,
activity_regularizer='l2'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# compile the model
model.compile(loss=losses.categorical_crossentropy,
optimizer=optimizers.RMSprop(),
metrics=['accuracy'])
# train the model
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
# evaluate the model
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss: %.2f' % score[0])
print('Test accuracy: %.2f' % (100. * score[1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Activation Functions
Step2: Activation as a layer
Step3: Activation from a TensorFlow function
Step4: Regularizers
Step5: Regularization as a layer
Step6: Dropout
Step7: Convolutional Neural Networks
Step8: Compiling the model
Step9: Loss function/optimizer as an instance
Step10: Categorical format
|
6,870
|
<ASSISTANT_TASK:>
Python Code:
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_raw, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = (
data_path / 'MEG' / 'sample' / 'sample_audvis-meg-oct-6-meg-inv.fif')
fname_raw = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif'
label_name = 'Aud-lh'
fname_label = data_path / 'MEG' / 'sample' / 'labels' / f'{label_name}.label'
snr = 1.0 # use smaller SNR for raw data
lambda2 = 1.0 / snr ** 2
method = "sLORETA" # use sLORETA method (could also be MNE or dSPM)
# Load data
raw = mne.io.read_raw_fif(fname_raw)
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
raw.set_eeg_reference('average', projection=True) # set average reference.
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
# Compute inverse solution
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop, pick_ori=None)
# Save result in stc files
stc.save('mne_%s_raw_inverse_%s' % (method, label_name), overwrite=True)
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: View activation time-series
|
6,871
|
<ASSISTANT_TASK:>
Python Code:
import gensim
import gensim.downloader as api
from gensim.utils import save_as_line_sentence
from gensim.models.word2vec import Word2Vec
print(gensim.models.word2vec.CORPUSFILE_VERSION) # must be >= 0, i.e. optimized compiled version
corpus = api.load("text8")
save_as_line_sentence(corpus, "my_corpus.txt")
model = Word2Vec(corpus_file="my_corpus.txt", iter=5, size=300, workers=14)
CORPUS_FILE = 'wiki-en-20171001.txt'
import itertools
from gensim.parsing.preprocessing import preprocess_string
def processed_corpus():
raw_corpus = api.load('wiki-english-20171001')
for article in raw_corpus:
# concatenate all section titles and texts of each Wikipedia article into a single "sentence"
doc = '\n'.join(itertools.chain.from_iterable(zip(article['section_titles'], article['section_texts'])))
yield preprocess_string(doc)
# serialize the preprocessed corpus into a single file on disk, using memory-efficient streaming
save_as_line_sentence(processed_corpus(), CORPUS_FILE)
from gensim.models.word2vec import LineSentence
import time
start_time = time.time()
model_sent = Word2Vec(sentences=LineSentence(CORPUS_FILE), iter=5, size=300, workers=32)
sent_time = time.time() - start_time
start_time = time.time()
model_corp_file = Word2Vec(corpus_file=CORPUS_FILE, iter=5, size=300, workers=32)
file_time = time.time() - start_time
print("Training model with `sentences` took {:.3f} seconds".format(sent_time))
print("Training model with `corpus_file` took {:.3f} seconds".format(file_time))
from gensim.test.utils import datapath
model_sent_accuracy = model_sent.wv.evaluate_word_analogies(datapath('questions-words.txt'))[0]
print("Word analogy accuracy with `sentences`: {:.1f}%".format(100.0 * model_sent_accuracy))
model_corp_file_accuracy = model_corp_file.wv.evaluate_word_analogies(datapath('questions-words.txt'))[0]
print("Word analogy accuracy with `corpus_file`: {:.1f}%".format(100.0 * model_corp_file_accuracy))
import gensim.downloader as api
from gensim.utils import save_as_line_sentence
from gensim.models.fasttext import FastText
corpus = api.load("text8")
save_as_line_sentence(corpus, "my_corpus.txt")
model = FastText(corpus_file="my_corpus.txt", iter=5, size=300, workers=14)
from gensim.models.word2vec import LineSentence
from gensim.models.fasttext import FastText
import time
start_time = time.time()
model_corp_file = FastText(corpus_file=CORPUS_FILE, iter=5, size=300, workers=32)
file_time = time.time() - start_time
start_time = time.time()
model_sent = FastText(sentences=LineSentence(CORPUS_FILE), iter=5, size=300, workers=32)
sent_time = time.time() - start_time
print("Training model with `sentences` took {:.3f} seconds".format(sent_time))
print("Training model with `corpus_file` took {:.3f} seconds".format(file_time))
from gensim.test.utils import datapath
model_sent_accuracy = model_sent.wv.evaluate_word_analogies(datapath('questions-words.txt'))[0]
print("Word analogy accuracy with `sentences`: {:.1f}%".format(100.0 * model_sent_accuracy))
model_corp_file_accuracy = model_corp_file.wv.evaluate_word_analogies(datapath('questions-words.txt'))[0]
print("Word analogy accuracy with `corpus_file`: {:.1f}%".format(100.0 * model_corp_file_accuracy))
import gensim.downloader as api
from gensim.utils import save_as_line_sentence
from gensim.models.doc2vec import Doc2Vec
corpus = api.load("text8")
save_as_line_sentence(corpus, "my_corpus.txt")
model = Doc2Vec(corpus_file="my_corpus.txt", epochs=5, vector_size=300, workers=14)
from gensim.models.doc2vec import Doc2Vec, TaggedLineDocument
import time
start_time = time.time()
model_corp_file = Doc2Vec(corpus_file=CORPUS_FILE, epochs=5, vector_size=300, workers=32)
file_time = time.time() - start_time
start_time = time.time()
model_sent = Doc2Vec(documents=TaggedLineDocument(CORPUS_FILE), epochs=5, vector_size=300, workers=32)
sent_time = time.time() - start_time
print("Training model with `sentences` took {:.3f} seconds".format(sent_time))
print("Training model with `corpus_file` took {:.3f} seconds".format(file_time))
from gensim.test.utils import datapath
model_sent_accuracy = model_sent.wv.evaluate_word_analogies(datapath('questions-words.txt'))[0]
print("Word analogy accuracy with `sentences`: {:.1f}%".format(100.0 * model_sent_accuracy))
model_corp_file_accuracy = model_corp_file.wv.evaluate_word_analogies(datapath('questions-words.txt'))[0]
print("Word analogy accuracy with `corpus_file`: {:.1f}%".format(100.0 * model_corp_file_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's prepare the full Wikipedia dataset as training corpus
Step2: Word2Vec
Step3: Training with corpus_file took 3.7x less time!
Step4: The accuracies are approximately the same.
Step5: Let's compare the timings
Step6: We see a 1.67x performance boost!
Step7: Doc2Vec
Step8: Let's compare the timings
Step9: A 6.6x speedup!
|
6,872
|
<ASSISTANT_TASK:>
Python Code:
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
mammal = True
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
def __str__(self):
return '{0} who is {1} years old.'.format(self.name, self.age)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
print(person1, person2)
def next_fibonacci(status=[]):
if len(status) < 2:
status.append(1)
return 1
status.append(status[-2] + status[-1])
return status[-1]
print(next_fibonacci(), next_fibonacci(), next_fibonacci(), next_fibonacci(), next_fibonacci(), next_fibonacci())
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
friends = []
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
def __str__(self):
return '{0} who is {1} years old'.format(self.name, self.age)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
person1.friends.append('Charlie')
person2.friends.append('Danielle')
print(person1.friends, person2.friends)
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
self.friends = []
def __str__(self):
return '{0} who is {1} years old'.format(self.name, self.age)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
person1.friends.append('Charlie')
person2.friends.append('Danielle')
print(person1.friends, person2.friends)
print('This works:', person1.friends)
print('This does not work:', friends)
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
self.friends = []
def __str__(self):
Return a string representation of the object
return '{0} who is {1} years old'.format(self.name, self.age)
def add_friend(self, friend):
Add a friend
self.friends.append(friend)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
person1.add_friend('Charlie')
person2.add_friend('Danielle')
print(person1.friends, person2.friends)
class Item(object):
def __init__(self, name, description, location):
self.name = name
self.description = description
self.location = location
def update_location(self, new_location):
pass
class Equipment(Item):
pass
class Consumable(Item):
def __init__(self, name, description, location, initial_quantity, current_quantity, storage_temp, flammability):
self.name = name
self.description = description
self.location = location
self.initial_quantity = initial_quantity
self.current_quantity = current_quantity
self.flammability = flammability
def update_quantity_remaining(self, amount):
pass
class Item(object):
def safely_stored(self):
raise NotImplementedError('override in subclass')
class Consumable(Item):
def safely_stored(self):
return True
a = Item()
a.safely_stored()
b = Consumable()
b.safely_stored()
from abc import ABCMeta, abstractmethod
class Item(metaclass=ABCMeta):
@abstractmethod
def safely_stored(self):
pass
class Consumable(Item):
def safely_stored(self):
return True
a = Item()
b = Consumable()
b.safely_stored()
class A(object):
def a(self):
print('a in class A')
class B(A):
def a(self):
A.a(self)
print('b in class B')
a = A()
a.a()
b = B()
b.a()
class A(object):
def a(self):
print('a in class A')
class B(A):
def a(self):
super().a()
print('b in class B')
a = A()
a.a()
b = B()
b.a()
class A(object):
def a(self):
print('A-a')
class A2(object):
def a(self):
print('A2-a')
class B(A, A2):
pass
a = A()
a.a()
a2 = A2()
a2.a()
b = B()
b.a()
class A(object):
def a(self):
print('A-a')
class A2(object):
def a(self):
print('A2-a')
class B(A):
pass
class C(B, A2):
pass
a = A()
a.a()
a2 = A2()
a2.a()
c = C()
c.a()
class Item(object):
def safely_stored(self):
raise NotImplementedError('override in subclass')
class Consumable(Item):
def safely_stored(self):
return True
a = Item()
b = Consumable()
print('a instance of Item:', isinstance(a, Item))
print('b instance of Consumable:', isinstance(b, Consumable))
print('b instance of Item:', isinstance(b, Item))
print('a instance of Consumable:', isinstance(a, Consumable))
class Ingredient(object):
The ingredient object that contains nutritional information
def __init__(self, name, carbs, protein, fat):
self.name = name
self.carbs = carbs
self.protein = protein
self.fat = fat
def get_nutrition(self):
Returns the nutritional information for the ingredient
return (self.carbs, self.protein, self.fat)
class Recipe(object):
The Recipe object containing the ingredients
def __init__(self, name, ingredients):
self.name = name
self.ingredients = ingredients
def get_nutrition(self):
Returns the nutritional information for the recipe
nutrition = [0, 0, 0]
for amount, ingredient in self.ingredients:
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
return nutrition
bread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)),
(30, Ingredient('Oil', 0, 0, 1)),
(36, Ingredient('Sugar', 1, 0, 0)),
(7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),
(560, Ingredient('Water', 0, 0, 0))])
print(bread.ingredients)
print(bread.get_nutrition())
import requests
r = requests.get('https://api.github.com/repos/streety/biof509/events')
print(r.status_code)
print(r.headers['content-type'])
print(r.text[:1000])
print(r.json()[0]['payload']['commits'][0]['message'])
type(r)
import pandas as pd
data = pd.DataFrame([[0,1,2,3], [4,5,6,7], [8,9,10,11]],
index=['a', 'b', 'c'],
columns=['col1', 'col2', 'col3', 'col4'])
data
print(data.shape)
print(data['col1'])
print(data.col1)
import matplotlib.pyplot as plt
%matplotlib inline
data.plot()
data.to_csv('Wk05-temp.csv')
data2 = pd.DataFrame.from_csv('Wk05-temp.csv', index_col=0)
data2
class Ingredient(object):
The ingredient object that contains nutritional information
def __init__(self, name, carbs, protein, fat):
self.name = name
self.carbs = carbs
self.protein = protein
self.fat = fat
def __repr__(self):
return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name, self.carbs, self.protein, self.fat)
def get_nutrition(self):
Returns the nutritional information for the ingredient
return (self.carbs, self.protein, self.fat)
class Recipe(object):
The Recipe object containing the ingredients
def __init__(self, name, ingredients):
self.name = name
self.ingredients = ingredients
def get_nutrition(self):
Returns the nutritional information for the recipe
nutrition = [0, 0, 0]
for amount, ingredient in self.ingredients:
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
return nutrition
bread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)),
(30, Ingredient('Oil', 0, 0, 1)),
(36, Ingredient('Sugar', 1, 0, 0)),
(7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),
(560, Ingredient('Water', 0, 0, 0))])
print(bread.ingredients)
print(bread.get_nutrition())
class Ingredient(object):
The ingredient object that contains nutritional information
def __init__(self, name, carbs, protein, fat):
self.name = name
self.carbs = carbs
self.protein = protein
self.fat = fat
def __repr__(self):
return 'Ingredient({0}, {1}, {2}, {3})'.format(self.name, self.carbs, self.protein, self.fat)
def get_nutrition(self):
Returns the nutritional information for the ingredient
return (self.carbs, self.protein, self.fat)
class Recipe(object):
The Recipe object containing the ingredients
def __init__(self, name, ingredients):
self.name = name
self.ingredients = ingredients
def get_nutrition(self):
Returns the nutritional information for the recipe
nutrition = [0, 0, 0]
for amount, ingredient in self.ingredients:
nutrition[0] += amount * ingredient.carbs
nutrition[1] += amount * ingredient.protein
nutrition[2] += amount * ingredient.fat
return nutrition
bread = Recipe('Bread', [(820, Ingredient('Flour', 0.77, 0.10, 0.01)),
(30, Ingredient('Oil', 0, 0, 1)),
(36, Ingredient('Sugar', 1, 0, 0)),
(7, Ingredient('Yeast', 0.3125, 0.5, 0.0625)),
(560, Ingredient('Water', 0, 0, 0))])
print(bread.ingredients)
print(bread.get_nutrition())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Week 11
Step3: Before we go on a note of caution is needed for class attributes. Do you remember the strange fibonacci sequence function from our first class?
Step6: The same issue can happen with classes, only this is a much more common source of bugs.
Step9: Both of our objects point to the same instance of the list type so adding a new friend to either object shows up in both.
Step10: Objects have their own namespace, although we have created variables called name, age, and friends they can only be accessed in the context of the object.
Step15: We are not limited to special methods when creating classes. Standard functions, or in this context methods, are an integral part of object oriented programming. Their definition is identical to special methods and functions outside of classes.
Step16: Private vs Public
Step17: Each of the classes we create inheriting from our general class can be thought of as having an 'is-a' relationship with the general class. For example, Equipment is a Item, Consumable is a Item.
Step18: A disadvantage with this approach is we only see the error message when we call the method. The error is in the way we implemented the class so it would be more intuitive to get an error earlier, when we first create the object.
Step19: Either of these approaches work well for adding new methods or completely changing the behaviour of a method. Often we only need to make a more subtle change. In this situation it can be useful to call a method from a parent class while only implementing our new functionality in the child class.
Step20: Using super() is usually the best approach, the reasons for this are covered in detail in this blog post
Step21: A simple rule-of-thumb is that search is depth first. The details are a little more complicated.
Step26: Duck typing
Step27: This has the basic functionality implemented but there are some improvements we can make.
Step28: The API documentation for requests
Step33: The API documentation for the DataFrame object.
Step38: Viewing the ingredients now looks much better. Let's now look at the get_nutrition method.
|
6,873
|
<ASSISTANT_TASK:>
Python Code:
from deepchem.molnet.load_function import hiv_datasets
from deepchem.models import GraphConvModel
from deepchem.data import NumpyDataset
from sklearn.metrics import average_precision_score
import numpy as np
tasks, all_datasets, transformers = hiv_datasets.load_hiv(featurizer="GraphConv")
train, valid, test = [NumpyDataset.from_DiskDataset(x) for x in all_datasets]
model = GraphConvModel(1, mode="classification")
model.fit(train)
y_true = np.squeeze(valid.y)
y_pred = model.predict(valid)[:,0,1]
print("Average Precision Score:%s" % average_precision_score(y_true, y_pred))
sorted_results = sorted(zip(y_pred, y_true), reverse=True)
hit_rate_100 = sum(x[1] for x in sorted_results[:100]) / 100
print("Hit Rate Top 100: %s" % hit_rate_100)
tasks, all_datasets, transformers = hiv_datasets.load_hiv(featurizer="GraphConv", split=None)
model = GraphConvModel(1, mode="classification", model_dir="/tmp/zinc/screen_model")
model.fit(all_datasets[0])
import os
work_units = os.listdir('/tmp/zinc/screen')
with open('/tmp/zinc/work_queue.sh', 'w') as fout:
fout.write("#!/bin/bash\n")
for work_unit in work_units:
full_path = os.path.join('/tmp/zinc', work_unit)
fout.write("python inference.py %s" % full_path)
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
from IPython.display import SVG
from rdkit.Chem.Draw import rdMolDraw2D
best_mols = [Chem.MolFromSmiles(x.strip().split()[0]) for x in open('/tmp/zinc/screen/top_100k.smi').readlines()[:100]]
best_scores = [x.strip().split()[2] for x in open('/tmp/zinc/screen/top_100k.smi').readlines()[:100]]
print(best_scores[0])
best_mols[0]
print(best_scores[0])
best_mols[1]
print(best_scores[0])
best_mols[2]
print(best_scores[0])
best_mols[3]
#We use the code from https://github.com/PatWalters/rd_filters, detailed explanation is here: http://practicalcheminformatics.blogspot.com/2018/08/filtering-chemical-libraries.html
#We will run the PAINS filter on best_mols as suggested by Issue 1355 (https://github.com/deepchem/deepchem/issues/1355)
import os
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Descriptors import MolWt, MolLogP, NumHDonors, NumHAcceptors, TPSA
from rdkit.Chem.rdMolDescriptors import CalcNumRotatableBonds
#First we get the rules from alert_collection.csv and then filter to get PAINS filter
rule_df = pd.read_csv(os.path.join(os.path.abspath(''), 'assets', 'alert_collection.csv'))
rule_df = rule_df[rule_df['rule_set_name']=='PAINS']
rule_list = []
for rule_id, smarts, max_val, desc in rule_df[["rule_id", "smarts", "max", "description"]].values.tolist():
smarts_mol = Chem.MolFromSmarts(smarts)
if smarts_mol:
rule_list.append((smarts_mol, max_val, desc))
def evaluate(smile):
mol = Chem.MolFromSmiles(smile)
if mol is None:
return [smile, "INVALID", -999, -999, -999, -999, -999, -999]
desc_list = [MolWt(mol), MolLogP(mol), NumHDonors(mol), NumHAcceptors(mol), TPSA(mol), CalcNumRotatableBonds(mol)]
for patt, max_val, desc in rule_list:
if len(mol.GetSubstructMatches(patt)) > max_val:
return [smiles, desc + " > %d" % (max_val)] +desc_list
return [smiles, "OK"]+desc_list
smiles = [x.strip().split()[0] for x in open('/tmp/zinc/screen/top_100k.smi').readlines()[:100]] # obtain the smiles
res = list(map(evaluate, smiles)) # here we apply the PAINS filter
df = pd.DataFrame(res, columns=["SMILES", "FILTER", "MW", "LogP", "HBD", "HBA", "TPSA", "Rot"])
df_ok = df[
(df.FILTER == "OK") &
df.MW.between(*[0, 500]) & # MW
df.LogP.between(*[-5, 5]) & #LogP
df.HBD.between(*[0, 5]) & #HBD
df.HBA.between(*[0, 10]) & #HBA
df.TPSA.between(*[0, 200]) & #TPSA
df.Rot.between(*[0, 10]) #Rot
]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Retrain Model Over Full Dataset For The Screen
Step2: 2. Create Work-Units
Step3: 5. Consume work units from "distribution mechanism"
Step4: The screen seems to favor molecules with one or multiple sulfur trioxides. The top scoring molecules also have low diversity. When creating a "buy list" we want to optimize for more things than just activity, for instance diversity and drug like MPO.
|
6,874
|
<ASSISTANT_TASK:>
Python Code:
#@title Install MuJoco (Edit to add your license key)
mjkey =
MuJoCo License Here
.strip()
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL deps
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Fetch MuJoCo binaries from Roboti
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=egl
#@title Imports
!pip install git+git://github.com/google-research/world_models.git
import tensorflow.compat.v1 as tf
from world_models.simulate import simulate
from world_models.agents import planet
from world_models.planners import planners
from world_models.objectives import objectives
from world_models.simulate import simulate
from world_models.utils import npz
from world_models.loops import train_eval
from world_models.tasks import tasks
tf.enable_eager_execution()
%load_ext tensorboard
task = tasks.DeepMindControl(domain_name='cheetah',
task_name='run',
action_repeat=4)
model = planet.RecurrentStateSpaceModel(task=task)
model_dir = '/tmp/experiment/model'
dist_strategy = tf.distribute.MirroredStrategy()
reset_fn = planet.create_planet_reset_fn(model=model)
observe_fn = planet.create_planet_observe_fn(model=model,
model_dir=model_dir,
strategy=dist_strategy)
predict_fn = planet.create_planet_predict_fn(model=model,
strategy=dist_strategy)
train_steps = 100 # How many training steps per episode
batch = 50
duration = 50 # How many timesteps in a single training sequence
learning_rate = 1e-3
train_fn = planet.create_planet_train_fn(model=model,
train_steps=train_steps,
batch=batch,
duration=duration,
learning_rate=learning_rate,
model_dir=model_dir,
strategy=dist_strategy)
objective_fn = objectives.DiscountedReward()
horizon = 12 # CEM planning horizon
iterations = 10 # CEM iterations
proposals = 1000 # Number of proposals to evaluate per iteration
top_fraction = 0.1 # Fraction of proposals with highest scores for fitting
# Base CEM planner to use for evaluation.
base_cem = planners.CEM(predict_fn=predict_fn,
observe_fn=observe_fn,
reset_fn=reset_fn,
task=task,
objective_fn=objective_fn,
horizon=horizon,
iterations=iterations,
proposals=proposals,
fraction=top_fraction)
# Training CEM planner with initial random cold start and random noise.
# Pure random actions for the first `n` episodes to bootstrap the world model.
random_cold_start_episodes = 5
train_cem = planners.RandomColdStart(task=task,
random_episodes=random_cold_start_episodes,
base_planner=base_cem)
# Add some Gaussian noise for active exploration.
noise_scale = 0.3
train_cem = planners.GaussianRandomNoise(task=task,
stdev=noise_scale,
base_planner=train_cem)
episode_num = 0
train_data_dir = '/tmp/experiment/data/train'
train_summary_dir = '/tmp/experiment/train'
episodes = list()
for i in range(random_cold_start_episodes):
episode, predictions, score = simulate.simulate(task=task,
planner=train_cem,
num_episodes=1)
scalar_summaries = {'score': score}
train_eval.visualize(summary_dir=train_summary_dir,
global_step=i,
episodes=episode,
predictions=predictions,
scalars=scalar_summaries)
episodes.extend(episode)
episode_num += 1
%tensorboard --logdir=/tmp/experiment/ --port=0
npz.save_dictionaries(episodes, train_data_dir)
train_fn(train_data_dir)
%tensorboard --logdir=/tmp/experiment/ --port=0
eval_summary_dir = '/tmp/experiment/eval'
episode, predictions, score = simulate.simulate(task=task,
planner=base_cem,
num_episodes=1)
scalar_summaries = {'score': score}
train_eval.visualize(summary_dir=eval_summary_dir,
global_step=i,
episodes=episode,
predictions=predictions,
scalars=scalar_summaries)
%tensorboard --logdir=/tmp/experiment/ --port=0
train_episodes_per_iter = 1 # How many training episodes to collect per train/eval iteration
eval_every_n_iters = 10 # A single eval episode every n iterations
num_iters = 100 # Total number of train/eval iterations
data_dir = '/tmp/experiment/loop/data/'
model_dir = '/tmp/experiment/loop/model'
train_eval.train_eval_loop(task=task,
train_planner=train_cem,
eval_planner=base_cem,
train_fn=train_fn,
num_train_episodes_per_iteration=train_episodes_per_iter,
eval_every_n_iterations=eval_every_n_iters,
num_iterations=num_iters,
episodes_dir=data_dir,
model_dir=model_dir
)
%tensorboard --logdir=/tmp/experiment/ --port=0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: World Models Library
Step2: Task
Step3: World Model
Step4: In addition to reset_fn, observe_fn and predict_fn, we also need to define a train_fn as an extra hook to train the model on the latest collected episodes, with this signature
Step5: Planner
Step6: Simulation
Step7: We normally need to update our world model periodically on all the collected episodes so far, therefore we need to interleave simulation with model training. Since the size of collected episodes will grow over time, we should persist them to disk and use optimized/cacheable data iterators for training. Utility functions in utils/npz.py can be used here.
Step8: Now we can evaluate our agent by using the base_planner that is noise free.
Step9: Off the Shelf Train-Eval Loop
|
6,875
|
<ASSISTANT_TASK:>
Python Code:
import random as rd
cara = 0
vig = 0
for j in range(200):
for i in range(5):
rdnum = rd.random()
if (rdnum < 0.90):
cara += 1
if cara > 3:
#print('Vigarista ganhou')
vig = vig + 2
cara = 0
else:
#print('Ambulante ganhou')
vig = vig - 2
cara = 0
print('Lucro do vigarista é de: {}'.format(vig))
vig = 0
seq = 0
v = 0
for j in range(200):
for i in range(5):
rdnum = rd.random()
if (rdnum < 0.90):
seq += 1
if seq == 3:
v = v+1
else:
seq = 0
vig = 4 * v
print('Lucro do vigarista é de: {}'.format(vig))
import matplotlib.pyplot as pp
%matplotlib inline
import random as rd
import math
x = 0
y = 1000
t = 0
pp.axis('equal')
while y > -0.0001:
pp.plot(x, y, 'o')
t = t + 1
x = 50*t
y = 1000 - 4.9*t**2
x = 0
y = 0
pp.axis('equal')
pp.plot(x, y, 'o')
while (x >= -80 and x <= 40):
passo = rd.random()
if (passo < 0.5):
x += 1
else:
x -= 1
y += 1
pp.plot(x, y, 'o')
if x > 40:
print('O bebado saiu pela extremidade a leste')
else:
print('O bebado saiu pela extremidade a oeste')
x = 0
y = 0
leste = 0
oeste = 0
for i in range(100):
while (x >= -80 and x <= 40):
passo = rd.random()
if (passo < 0.5):
x += 1
else:
x -= 1
if x > 40:
leste += 1
x = 0
else:
oeste += 1
x = 0
print('A probalidade do bebado sair a leste é {:.2f}% e {:.2f}% a oeste.'.format(leste, oeste))
a = 0
b = 0
door = rd.randint(1,3)
usr = int(input('Escolha um número dos números \n[1,2,3]\nSua opçao: '))
if door == usr:
if usr == 1:
a = rd.randint(2,3)
elif usr == 2:
a = rd.randrange(1,4,2)
elif usr == 3:
a = rd.randint(1,2)
elif (usr + door) == 3:
a = 3
elif (usr + door) == 4:
a = 2
elif (usr + door) == 4:
a = 1
print('A porta {} foi aberta'.format(a))
usr = int(input('Escolha uma nova porta ou continue com a mesma\nSua opçao: '))
if usr == door:
print('Você ganhou!!!')
else:
print('Infelizmente a porta estava vazia...')
countdif = 0
counteq = 0
for i in range(1,1001):
a = 0
b = 0
door = rd.randint(1,3)
usr = rd.randint(1,3)
if door == usr:
if usr == 1:
a = rd.randint(2,3)
elif usr == 2:
a = rd.randrange(1,4,2)
elif usr == 3:
a = rd.randint(1,2)
elif (usr + door) == 3:
a = 3
elif (usr + door) == 4:
a = 2
elif (usr + door) == 4:
a = 1
if a == 1:
usr1 = rd.randint(2,3)
elif a == 2:
usr1 = rd.randrange(1,4,2)
elif a == 3:
usr1 = rd.randint(1,2)
if usr1 == door and usr1 != usr:
countdif += 1
elif usr1 == door and usr1 == usr:
counteq += 1
print('Acertos sem mudar de porta: {}\nAcertos mudando de porta: {}'.format(counteq,countdif))
alpha = float(input('Digite um angulos entre 30º a 90º: '))
fps = 5
v_0 = 50
v_x = v_0 * math.cos(alpha)
v_y = v_0 * math.sin(alpha)
x = 0.0
y = 0.0
t = 0
pp.axis('equal')
while y > -0.0001:
pp.plot(x, y, 'or')
t = t + 1 / fps
x = v_x * t
y = v_y * t - 4.9 * t**2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Questão 1
Step2: Questão 2
Step3: Bêbado
Step4: Questão 4
Step5: Porta da esperança
Step6: Questão 6
Step7: Balística
|
6,876
|
<ASSISTANT_TASK:>
Python Code:
#Import matplotlib
%matplotlib inline
#Import pandas, seaborn, and ipython display
import pandas as pd
import seaborn as sns
from IPython.display import display, HTML
#Read in taxonmic classification results from sourmash with pandas
#Dataframe name, read in csv file
mg_1_table = pd.read_csv("../data/mg_1")
mg_2_table = pd.read_csv("../data/mg_2")
mg_3_table = pd.read_csv("../data/mg_3")
mg_4_table = pd.read_csv("../data/mg_4")
mg_5_table = pd.read_csv("../data/mg_5")
mg_6_table = pd.read_csv("../data/mg_6")
mg_7_table = pd.read_csv("../data/mg_7")
mg_8_table = pd.read_csv("../data/mg_8")
#Display taxonomic classification results for 8 metagenomes
#Display data frames as tabels with display()
#Remove dataframe by commenting out using the "#" symbol
#Display all dataframes
display(mg_1_table)
display(mg_2_table)
display(mg_3_table)
display(mg_4_table)
display(mg_5_table)
display(mg_6_table)
display(mg_7_table)
display(mg_8_table)
#Combined output into a single file named all_gather_results.csv
!head -1 ../data/mg_1 \
> all_gather_results.csv; tail -n +2 -q ../data/mg_1 ../data/mg_2 ../data/mg_3 ../data/mg_4 ../data/mg_5 ../data/mg_6 ../data/mg_7 ../data/mg_8 >> all_gather_results.csv
sns.set(style="darkgrid")
#Ploting the frequency of detection of each match across the 8 metagenomes
dx = pd.read_csv('all_gather_results.csv', header = 0)
dx['name'].value_counts().plot(kind="barh", fontsize=16, figsize=(12,12))
#plt.savefig('<file name>.pdf', bbox_inches='tight')
#Ploting average of the fraction of match detected across all metagenomes
newdx = dx[['f_match', 'name']].copy()
newdx
newdx_byname = newdx.set_index('name')
newdx_byname.groupby(level=0).mean().plot(kind="barh", fontsize=16, figsize=(12,12))
#plt.savefig('<insert name>.pdf', bbox_inches='tight')
#Calculate jaccard distance using sourmash compare and generate results in a csv named mg_compare
#Path to sourmash install, "compare", path to signatures, output format, output filename
!~/py3/bin/sourmash compare ../data/mg_*sig --csv mg_compare
#Generate similarity matrix with hierchical clustering
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(context="paper", font="monospace")
sns.set(font_scale=1.4)
#Define clustermap color scheme
cmap = sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95, as_cmap=True)
# Load the datset
df = pd.read_csv("mg_compare", header=0)
# Draw the clustermap using seaborn
o = sns.clustermap(df, vmax=1, vmin=0, square=True, linewidths=.005, cmap=cmap)
#Bold labels and rotate
plt.setp(o.ax_heatmap.get_yticklabels(), rotation=0, fontweight="bold")
plt.setp(o.ax_heatmap.get_xticklabels(), rotation=90, fontweight="bold")
#Set context with seaborn
sns.set(context="paper",font="monospace")
#Save figure
#plt.savefig(<filename>.pdf)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2) Convert sourmash output (i.e. csv) to dataframe from visualization
Step2: Terms
Step3: 3) Compare metagenomes with sourmash compare
Step4: 4) Visualize metagenome comparisons
|
6,877
|
<ASSISTANT_TASK:>
Python Code:
a = list(range(10))
print(a)
b = list(range(100))
print(b)
a.append("ich bin keine Zahl")
b.append("ich bin keine Zahl")
print(a,b)
a.pop()
b.pop()
print(a,b)
a = list(map(str, a))
b = list(map(str, b))
print(a,b)
for element_in_der_liste in a:
print(str(element_in_der_liste))
leere_list = []
for element_in_der_list in a:
neues_element = str(element_in_der_list)
leere_list.append(neues_element)
leere_list
lange_leere_list = []
for x in b:
neues_element = str(x)
lange_leere_list.append(neues_element)
lange_leere_list
b = list(map(int,b))
b[-10:]
b[89:]
b[89:95]
#welche "folgende Liste"?
#Ich erfinde einfach mal eine Liste, die in etwa zur Aufgabe 8. zu passen scheint.
lst = [101,1,15,150,1500,20,500,12000,2]
lst.sort()
lst[-2]
print(lst)
b = [23, 2345, 3, 234527, 3452345, 345, 3421, 12]
b
b_neue = []
for elem in b:
if elem > 1000:
pass
elif elem > 100 and elem < 1000:
b_neue.append(str(elem))
else:
elem = elem * 100
b_neue.append(elem)
b_neue
City = [{"Stadt": "Zürich", "Bevölkerung": 400000, "Kanton": "ZH"}, {"Stadt": "Bern", "Bevölkerung": 350000, "Kanton": "BE"}, {"Stadt": "Genf", "Bevölkerung": 310000, "Kanton": "GE"}, {"Stadt": "Basel", "Bevölkerung": 300000, "Kanton": "BS"}, {"Stadt": "Luzern", "Bevölkerung": 200000, "Kanton": "LU"}]
City[3]["Bevölkerung"]
for x in City:
print(x)
x = {"Stadt": "Zürich", "Bevölkerung": 400000, "Kanton": "ZH"}
print(x["Bevölkerung"])
#"Human Style" megamüehsam
City[0]["Bevölkerung"] + City[1]["Bevölkerung"] + City[2]["Bevölkerung"] + City[3]["Bevölkerung"] + City[4]["Bevölkerung"]
result = 0
for x in City:
result += x["Bevölkerung"]
# += ist ein Shortcut und heisst so viel wie mit sich selber addieren.
print(result)
Gesamtbevölkerung = 8000000
for x in City:
# x steht hier für die Stadt. Genauer die Dictionary, welche alle Daten zu einer Stadt enthält (Bevölk., Kanton und Stadt)
prozent = round((x["Bevölkerung"] /Gesamtbevölkerung) *100,1)
print(x["Stadt"] + ": " + str(prozent))
x
Lausanne = {"Bevölkerung": 90000, "Stadt": "Lausanne", "Kanton": "VD"}
Hombi = {"Bevölkerung": 8000, "Stadt": "Hombi", "Kanton": "ZH"}
City.append(Lausanne)
City.append(Hombi)
City
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.Mache dasselbe mit einer Liste aus 100 Elementen und ordne sie der Variabel b zu.
Step2: 3.Füge beiden Listen folgenden String an
Step3: 4.Lösche diesen letzten Eintrag in der Liste wieder
Step4: 5.Verwandle jede Zahl in den Listen a und b von int in str?
Step5: 6.Von der list b, zeige nur die letzten zehn Nummern an
Step6: 7.Von der folgenden Liste, zeige den zweit grössten Wert an
Step7: 8.Multipliziere jede Nummer in dieser Liste, die kleiner ist als 100, mit 100; wenn die Nummern zwischen 100 und 1000 verwandle sie in eine String, und wenn sie grösser oder gleich 1000 ist, lösche sie.
Step8: 9.Schreibe eine Dictionary-Liste der fünf grössten Schweizer Städte, ihrer Bevölkerung und dem jeweiligen Kanton.
Step9: 10.Zeige nur die Bevölkerung der Stadt Genf an
Step10: 11.Drucke (print) das Total der Bevölkerungen aller Städte aus
Step11: 12.Rechne den Anteil der Bevölkerung der jeweiligen Städte an der Gesamtbevölkerung der Schweiz aus und "print" die Ergebnisse neben dem Städtenamen
Step12: 13.Füge noch die Städte Winterthur und Luzern hinzu
|
6,878
|
<ASSISTANT_TASK:>
Python Code:
from cntk import load_model
import findspark
findspark.init('/root/spark-2.1.0-bin-hadoop2.6')
import os
import numpy as np
import pandas as pd
import pickle
import sys
from pyspark import SparkFiles
from pyspark import SparkContext
from pyspark.sql.session import SparkSession
sc =SparkContext()
spark = SparkSession(sc)
import tarfile
from urllib.request import urlretrieve
import xml.etree.ElementTree
cifar_uri = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' # Location of test image dataset
mean_image_uri = 'https://raw.githubusercontent.com/Azure-Samples/hdinsight-pyspark-cntk-integration/master/CIFAR-10_mean.xml' # Mean image for subtraction
model_uri = 'https://github.com/Azure-Samples/hdinsight-pyspark-cntk-integration/raw/master/resnet20_meanimage_159.dnn' # Location of trained model
local_tmp_dir = '/tmp/cifar'
local_cifar_path = os.path.join(local_tmp_dir, os.path.basename(cifar_uri))
local_model_path = os.path.join(local_tmp_dir, 'model.dnn')
local_mean_image_path = os.path.join(local_tmp_dir, 'mean_image.xml')
os.makedirs(local_tmp_dir, exist_ok=True)
if not os.path.exists(local_cifar_path):
urlretrieve(cifar_uri, filename=local_cifar_path)
with tarfile.open(local_cifar_path, 'r:gz') as f:
test_dict = pickle.load(f.extractfile('cifar-10-batches-py/test_batch'), encoding='latin1')
def reshape_image(record):
image, label, filename = record
return image.reshape(3,32,32).transpose(1,2,0), label, filename
image_rdd = sc.parallelize(zip(test_dict['data'], test_dict['labels'], test_dict['filenames']))
image_rdd = image_rdd.map(reshape_image)
sample_images = image_rdd.take(5)
image_data = np.array([i[0].reshape((32*32*3)) for i in sample_images]).T
image_labels = [i[2] for i in sample_images]
image_df = pd.DataFrame(image_data, columns=image_labels)
spark.createDataFrame(image_df).coalesce(1).write.mode("overwrite").csv("/tmp/cifar_image", header=True)
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from glob import glob
image_df = pd.read_csv(glob('/tmp/cifar_image/*.csv')[0])
plt.figure(figsize=(15,1))
for i, col in enumerate(image_df.columns):
plt.subplot(1, 5, i+1)
image = image_df[col].values.reshape((32, 32, 3))
plt.imshow(image)
plt.title(col)
cur_axes = plt.gca()
cur_axes.axes.get_xaxis().set_visible(False)
cur_axes.axes.get_yaxis().set_visible(False)
urlretrieve(model_uri, local_model_path)
sc.addFile(local_model_path)
urlretrieve(mean_image_uri, local_mean_image_path)
mean_image = xml.etree.ElementTree.parse(local_mean_image_path).getroot()
mean_image = [float(i) for i in mean_image.find('MeanImg').find('data').text.strip().split(' ')]
mean_image = np.array(mean_image).reshape((32, 32, 3)).transpose((2, 0, 1))
mean_image_bc = sc.broadcast(mean_image)
def get_preprocessed_image(my_image, mean_image):
''' Reshape and flip RGB order '''
my_image = my_image.astype(np.float32)
bgr_image = my_image[:, :, ::-1] # RGB -> BGR
image_data = np.ascontiguousarray(np.transpose(bgr_image, (2, 0, 1)))
image_data -= mean_image
return(image_data)
def run_worker(records):
''' Scoring script run by each worker '''
loaded_model = load_model(SparkFiles.get('./model.dnn'))
mean_image = mean_image_bc.value
# Iterate through the records in the RDD.
# record[0] is the image data
# record[1] is the true label
# record[2] is the file name
for record in records:
preprocessed_image = get_preprocessed_image(record[0], mean_image)
dnn_output = loaded_model.eval({loaded_model.arguments[0]: [preprocessed_image]})
yield record[1], np.argmax(np.squeeze(dnn_output))
labelled_images = image_rdd.mapPartitions(run_worker)
# Time how long it takes to score 10k test images
start = pd.datetime.now()
results = labelled_images.collect()
print('Scored {} images'.format(len(results)))
stop = pd.datetime.now()
print(stop - start)
df = pd.DataFrame(results, columns=['true_label', 'predicted_label'])
num_correct = sum(df['true_label'] == df['predicted_label'])
num_total = len(results)
print('Correctly predicted {} of {} images ({:0.2f}%)'.format(num_correct, num_total, 100 * num_correct / num_total))
spark.createDataFrame(df).coalesce(1).write.mode("overwrite").csv("/tmp/cifar_scores", header=True)
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import os
from glob import glob
df = pd.read_csv(glob('/tmp/cifar_scores/*.csv')[0])
print('Constructing a confusion matrix with the first {} samples'.format(len(df.index)))
label_to_name_dict = {0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'}
labels = np.sort(df['true_label'].unique())
named_labels = [label_to_name_dict[i] for i in labels]
cm = confusion_matrix(df['true_label'], df['predicted_label'], labels=labels)
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, named_labels, rotation=90)
plt.yticks(tick_marks, named_labels)
plt.xlabel('Predicted label')
plt.ylabel('True Label')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a name="tarball"></a>
Step2: <a name="rdd"></a>
Step3: To convince ourselves that the data has been properly loaded, let's visualize a few of these images. For plotting, we will need to transfer them to the local context by way of a Spark dataframe
Step4: <a name="score"></a>
Step5: <a name="functions"></a>
Step6: <a name="map"></a>
Step7: <a name="evaluate"></a>
Step8: We can construct a confusion matrix to visualize which classification errors are most common
|
6,879
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function
import tellurium as te
# to get the tellurium version use
print('te.__version__')
print(te.__version__)
# or
print('te.getTelluriumVersion()')
print(te.getTelluriumVersion())
# to print the full version info use
print('-' * 80)
te.printVersionInfo()
print('-' * 80)
from builtins import range
# Load SBML file
r = te.loada(
model test
J0: X0 -> X1; k1*X0;
X0 = 10; X1=0;
k1 = 0.2
end
)
import matplotlib.pyplot as plt
# Turn of notices so they don't clutter the output
te.noticesOff()
for i in range(0, 20):
result = r.simulate (0, 10)
r.reset()
r.plot(result, loc=None, show=False,
linewidth=2.0, linestyle='-', color='black', alpha=0.8)
r.k1 = r.k1 + 0.2
# Turn the notices back on
te.noticesOn()
# create tmp file
import tempfile
ftmp = tempfile.NamedTemporaryFile(suffix=".xml")
# load model
r = te.loada('S1 -> S2; k1*S1; k1 = 0.1; S1 = 10')
# save to file
te.saveToFile(ftmp.name, r.getMatlab())
# or easier via
r.exportToMatlab(ftmp.name)
# load file
sbmlstr = te.readFromFile(ftmp.name)
print('%' + '*'*80)
print('Converted MATLAB code')
print('%' + '*'*80)
print(sbmlstr)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Repeat simulation without notification
Step3: File helpers for reading and writing
|
6,880
|
<ASSISTANT_TASK:>
Python Code:
import os
from google.cloud import bigquery
import pandas as pd
%load_ext google.cloud.bigquery
PROJECT = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT
REGION = "us-east1"
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
%%bigquery --project $PROJECT
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
AND LENGTH(url) > 0
LIMIT 10
%%bigquery --project $PROJECT
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 100
regex = '.*://(.[^/]+)/'
sub_query =
SELECT
title,
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '{0}'), '.'))[OFFSET(1)] AS source
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '{0}'), '.com$')
AND LENGTH(title) > 10
.format(regex)
query =
SELECT
LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title,
source
FROM
({sub_query})
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
.format(sub_query=sub_query)
print(query)
bq = bigquery.Client(project=PROJECT)
title_dataset = bq.query(query).to_dataframe()
title_dataset.head()
print("The full dataset contains {n} titles".format(n=len(title_dataset)))
title_dataset.source.value_counts()
DATADIR = './data/'
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
FULL_DATASET_NAME = 'titles_full.csv'
FULL_DATASET_PATH = os.path.join(DATADIR, FULL_DATASET_NAME)
# Let's shuffle the data before writing it to disk.
title_dataset = title_dataset.sample(n=len(title_dataset))
title_dataset.to_csv(
FULL_DATASET_PATH, header=False, index=False, encoding='utf-8')
sample_title_dataset = title_dataset.sample(n=1000)
sample_title_dataset.source.value_counts()
SAMPLE_DATASET_NAME = 'titles_sample.csv'
SAMPLE_DATASET_PATH = os.path.join(DATADIR, SAMPLE_DATASET_NAME)
sample_title_dataset.to_csv(
SAMPLE_DATASET_PATH, header=False, index=False, encoding='utf-8')
sample_title_dataset.head()
import datetime
import os
import shutil
import pandas as pd
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
from tensorflow_hub import KerasLayer
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
print(tf.__version__)
%matplotlib inline
MODEL_DIR = "./text_models"
DATA_DIR = "./data"
ls ./data/
DATASET_NAME = "titles_full.csv"
TITLE_SAMPLE_PATH = os.path.join(DATA_DIR, DATASET_NAME)
COLUMNS = ['title', 'source']
titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS)
titles_df.head()
titles_df.source.value_counts()
CLASSES = {
'github': 0,
'nytimes': 1,
'techcrunch': 2
}
N_CLASSES = len(CLASSES)
def encode_labels(sources):
classes = [CLASSES[source] for source in sources]
one_hots = to_categorical(classes, num_classes=N_CLASSES)
return one_hots
encode_labels(titles_df.source[:4])
N_TRAIN = int(len(titles_df) * 0.95)
titles_train, sources_train = (
titles_df.title[:N_TRAIN], titles_df.source[:N_TRAIN])
titles_valid, sources_valid = (
titles_df.title[N_TRAIN:], titles_df.source[N_TRAIN:])
sources_train.value_counts()
sources_valid.value_counts()
X_train, Y_train = titles_train.values, encode_labels(sources_train)
X_valid, Y_valid = titles_valid.values, encode_labels(sources_valid)
X_train[:3]
Y_train[:3]
# TODO 1
NNLM = "https://tfhub.dev/google/nnlm-en-dim50/2"
nnlm_module = KerasLayer(
NNLM, output_shape=[50], input_shape=[], dtype=tf.string, trainable=True)
# TODO 1
nnlm_module(tf.constant(["The dog is happy to see people in the street."]))
# TODO 1
SWIVEL = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1"
swivel_module = KerasLayer(
SWIVEL, output_shape=[20], input_shape=[], dtype=tf.string, trainable=True)
# TODO 1
swivel_module(tf.constant(["The dog is happy to see people in the street."]))
def build_model(hub_module, name):
model = Sequential([
hub_module, # TODO 2
Dense(16, activation='relu'),
Dense(N_CLASSES, activation='softmax')
], name=name)
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
def train_and_evaluate(train_data, val_data, model, batch_size=5000):
X_train, Y_train = train_data
tf.random.set_seed(33)
model_dir = os.path.join(MODEL_DIR, model.name)
if tf.io.gfile.exists(model_dir):
tf.io.gfile.rmtree(model_dir)
history = model.fit(
X_train, Y_train,
epochs=100,
batch_size=batch_size,
validation_data=val_data,
callbacks=[EarlyStopping(), TensorBoard(model_dir)],
)
return history
data = (X_train, Y_train)
val_data = (X_valid, Y_valid)
nnlm_model = build_model(nnlm_module, 'nnlm')
nnlm_history = train_and_evaluate(data, val_data, nnlm_model)
history = nnlm_history
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot()
swivel_model = build_model(swivel_module, name='swivel')
swivel_history = train_and_evaluate(data, val_data, swivel_model)
history = swivel_history
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot()
OUTPUT_DIR = "./savedmodels"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, 'swivel')
os.environ['EXPORT_PATH'] = EXPORT_PATH
shutil.rmtree(EXPORT_PATH, ignore_errors=True)
tf.saved_model.save(swivel_model, EXPORT_PATH)
%%bash
# TODO 5
MODEL_NAME=title_model
VERSION_NAME=swivel
if [[ $(gcloud ai-platform models list --format='value(name)' | grep ^$MODEL_NAME$) ]]; then
echo "$MODEL_NAME already exists"
else
echo "Creating $MODEL_NAME"
gcloud ai-platform models create --region=$REGION $MODEL_NAME
fi
if [[ $(gcloud ai-platform versions list --model $MODEL_NAME --format='value(name)' | grep ^$VERSION_NAME$) ]]; then
echo "Deleting already existing $MODEL_NAME:$VERSION_NAME ... "
echo yes | gcloud ai-platform versions delete --model=$MODEL_NAME $VERSION_NAME
echo "Please run this cell again if you don't see a Creating message ... "
sleep 2
fi
echo "Creating $MODEL_NAME:$VERSION_NAME"
gcloud ai-platform versions create \
--model=$MODEL_NAME $VERSION_NAME \
--framework=tensorflow \
--python-version=3.7 \
--runtime-version=2.1 \
--origin=$EXPORT_PATH \
--staging-bucket=gs://$BUCKET \
--machine-type n1-standard-4 \
--region=$REGION
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir {EXPORT_PATH}
!find {EXPORT_PATH}
%%writefile input.json
{"keras_layer_1_input": "hello"}
!gcloud ai-platform predict \
--model title_model \
--json-instances input.json \
--version swivel \
--region=$REGION
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Replace the variable values in the cell below
Step2: Create a Dataset from BigQuery
Step3: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http
Step6: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
Step7: For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here.
Step8: AutoML for text classification requires that
Step9: Let's make sure we have roughly the same number of labels for each of our three labels
Step10: Finally we will save our data, which is currently in-memory, to disk.
Step11: Now let's sample 1000 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see here for further details on how to prepare data for AutoML).
Step12: Let's write the sample datatset to disk.
Step13: Let's start by specifying where the information about the trained models will be saved as well as where our dataset is located
Step14: Loading the dataset
Step15: Let's look again at the number of examples per label to make sure we have a well-balanced dataset
Step16: Preparing the labels
Step17: Preparing the train/test splits
Step18: To be on the safe side, we verify that the train and test splits
Step19: Now let's create the features and labels we will feed our models with
Step20: NNLM Model
Step21: Note that this TF-Hub embedding produces a single 50-dimensional vector when passed a sentence
Step22: Swivel Model
Step23: Similarly as the previous pre-trained embedding, it outputs a single vector when passed a sentence
Step24: Building the models
Step25: Let's also wrap the training code into a train_and_evaluate function that
Step26: Training NNLM
Step27: Training Swivel
Step28: Deploying the model
Step29: Then we can deploy the model using the gcloud CLI as before
Step30: Before we try our deployed model, let's inspect its signature to know what to send to the deployed API
Step31: Let's go ahead and hit our model
|
6,881
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
# execute dummy code here
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
iris = datasets.load_iris()
RFclf = RandomForestClassifier().fit(iris.data, iris.target)
print(np.shape( # complete
print( # complete
print(np.shape( # complete
print( # complete
plt.scatter( # complete
plt.xlabel('sepal length')
plt.ylabel('sepal width')
from sklearn.neighbors import KNeighborsClassifier
KNNclf = KNeighborsClassifier( # complete
preds = # complete
plt.figure()
plt.scatter( # complete
KNNclf = KNeighborsClassifier(# complete
preds = # complete
plt.figure()
plt.scatter( # complete
from sklearn.cross_validation import cross_val_predict
CVpreds = cross_val_predict( # complete
plt.figure()
plt.scatter( # complete
print("The accuracy of the kNN = 5 model is ~{:.4}".format( # complete
CVpreds50 = cross_val_predict( # complete
print("The accuracy of the kNN = 50 model is ~{:.4}".format( # complete
# complete
from sklearn.metrics import confusion_matrix
cm = confusion_matrix( # complete
print(cm)
normalized_cm = cm.astype('float')/cm.sum(axis = 1)[:,np.newaxis]
normalized_cm
plt.imshow( # complete
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1) Introduction to scikit-learn
Step2: Generally speaking, the procedure for scikit-learn is uniform across all machine-learning algorithms. Models are accessed via the various modules (ensemble, SVM, neighbors, etc), with user-defined tuning parameters. The features (or data) for the models are stored in a 2D array, X, with rows representing individual sources and columns representing the corresponding feature values. [In a minority of cases, X, represents a similarity or distance matrix where each entry represents the distance to every other source in the data set.] In cases where there is a known classification or scalar value (typically supervised methods), this information is stored in a 1D array y.
Step3: Problem 1d What is the shape and content of the iris target?
Step4: Finally, as a baseline for the exercises that follow, we will now make a simple 2D plot showing the separation of the 3 classes in the iris dataset. This plot will serve as the reference for examining the quality of the clustering algorithms.
Step5: Problem 2) Supervised Machine Learning
Step6: These results are almost identical to the training classifications. However, we have cheated! In this case we are evaluating the accuracy of the model (98% in this case) using the same data that defines the model. Thus, what we have really evaluated here is the training error. The relevant parameter, however, is the generalization error
Step7: While it is useful to understand the overall accuracy of the model, it is even more useful to understand the nature of the misclassifications that occur.
Step8: We just found that the classifier does a much better job classifying setosa and versicolor than it does for virginica. The main reason for this is some viginica flowers lie far outside the main virginica locus, and within predominantly versicolor "neighborhoods". In addition to knowing the accuracy for the individual classes, it is also useful to know class predictions for the misclassified sources, or in other words where there is "confusion" for the classifier. The best way to summarize this information is with a confusion matrix. In a confusion matrix, one axis shows the true class and the other shows the predicted class. For a perfect classifier all of the power will be along the diagonal, while confusion is represented by off-diagonal signal.
Step9: From this representation, we see right away that most of the virginica that are being misclassifed are being scattered into the versicolor class. However, this representation could still be improved
Step10: The normalization makes it easier to compare the classes, since each class has a different number of sources. Now we can procede with a visual representation of the confusion matrix. This is best done using imshow() within pyplot. You will also need to plot a colorbar, and labeling the axes will also be helpful.
|
6,882
|
<ASSISTANT_TASK:>
Python Code:
# Getting Basic Data
import goldsberry
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.max_columns", 50)
pd.options.mode.chained_assignment = None
print goldsberry.__version__
print pd.__version__
# Getting Players List
players_2015 = goldsberry.PlayerList(Season='2015-16')
players_2015 = pd.DataFrame(players_2015.players())
harden_id = players_2015.loc[players_2015['DISPLAY_LAST_COMMA_FIRST'].str.contains("Harden"), 'PERSON_ID']
#XY Shot Charts
harden_shots = goldsberry.player.shot_chart(harden_id.values.tolist()[0], Season='2015-16')
harden_shots = pd.DataFrame(harden_shots.chart())
harden_shots.shape
harden_shots.head()
dashboard = goldsberry.player.shot_dashboard(harden_id)
pd.DataFrame(dashboard.dribble())
#Sort XY Shots and Assign a Shot Number
#ShotNumber will be used to merge the two datasets.
harden_shots.sort(['GAME_ID', 'GAME_EVENT_ID'], inplace=True)
harden_shots['SHOT_NUMBER'] = harden_shots.groupby(['GAME_ID', 'PLAYER_ID'])['GAME_EVENT_ID'].cumcount()+1
#Merge data into a single dataframe
harden_shots_full = pd.merge(harden_shots, harden_shots_advanced, on=['GAME_ID', 'SHOT_NUMBER'], how='left')
harden_shots_full.head()
sns.set_style("white")
sns.set_color_codes()
plt.figure(figsize=(12,11))
plt.scatter(harden_shots.LOC_X, harden_shots.LOC_Y)
plt.show()
def draw_court(ax=None, color='black', lw=2, outer_lines=False):
# If an axes object isn't provided to plot onto, just get current one
if ax is None:
ax = plt.gca()
# Create the various parts of an NBA basketball court
# Create the basketball hoop
# Diameter of a hoop is 18" so it has a radius of 9", which is a value
# 7.5 in our coordinate system
hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False)
# Create backboard
backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color)
# The paint
# Create the outer box 0f the paint, width=16ft, height=19ft
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color,
fill=False)
# Create the inner box of the paint, widt=12ft, height=19ft
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color,
fill=False)
# Create free throw top arc
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=lw, color=color, fill=False)
# Create free throw bottom arc
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color, linestyle='dashed')
# Restricted Zone, it is an arc with 4ft radius from center of the hoop
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw,
color=color)
# Three point line
# Create the side 3pt lines, they are 14ft long before they begin to arc
corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw,
color=color)
corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color)
# 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop
# I just played around with the theta values until they lined up with the
# threes
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw,
color=color)
# Center Court
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color)
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=lw, color=color)
# List of the court elements to be plotted onto the axes
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
if outer_lines:
# Draw the half court line, baseline and side out bound lines
outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw,
color=color, fill=False)
court_elements.append(outer_lines)
# Add the court elements onto the axes
for element in court_elements:
ax.add_patch(element)
return ax
plt.figure(figsize=(12,11))
plt.scatter(harden_shots_full.LOC_X[0], harden_shots_full.LOC_Y[0])
draw_court()
defender = Circle(xy, def_dist, alpha=.5)
fig = plt.gcf()
fig.gca().add_artist(defender)
# Descending values along the axis from left to right
plt.xlim(-300,300)
plt.ylim(422.5, -47.5)
len(harden_shots_full)
def draw_defender_radius(df, ax=None, alpha = .25):
# If an axes object isn't provided to plot onto, just get current one
if ax is None:
ax = plt.gca()
for i in range(len(df)):
defender = Circle((df.LOC_X[i],df.LOC_Y[i]),
radius = df.CLOSE_DEF_DIST[i]*10,
alpha = alpha)
ax.add_patch(defender)
return ax
def fancy_shotchart(df):
plt.figure(figsize=(12,11))
plt.scatter(df.LOC_X, df.LOC_Y)
draw_court()
draw_defender_radius(df)
# Descending values along the axis from left to right
plt.xlim(-300,300)
plt.ylim(422.5, -47.5)
harden_game = harden_shots_full.ix[harden_shots.GAME_ID == '0021400003']
fancy_shotchart(harden_game)
plt.figure(figsize=(12,11))
plt.scatter(harden_game.LOC_X, harden_game.LOC_Y,
s=pi*(harden_game.CLOSE_DEF_DIST*10)**2,
alpha = .25, c = harden_game.SHOT_MADE_FLAG,
cmap = plt.cm.RdYlGn)
plt.scatter(harden_game.LOC_X, harden_game.LOC_Y, c='black')
draw_court()
# Descending values along the axis from left to right
plt.xlim(-300,300)
plt.ylim(422.5, -47.5)
def fancy_shots(df):
plt.figure(figsize=(12,11))
plt.scatter(df.LOC_X, df.LOC_Y,
s=pi*(df.CLOSE_DEF_DIST*10)**2,
alpha = .25, c = df.SHOT_MADE_FLAG,
cmap = plt.cm.RdYlGn)
plt.scatter(df.LOC_X, df.LOC_Y, c='black')
draw_court()
# Descending values along the axis from left to right
plt.xlim(-300,300)
plt.ylim(422.5, -47.5)
fancy_shots(harden_shots_full.ix[harden_shots.GAME_ID == '0021400003'])
fancy_shots(harden_shots_full.ix[harden_shots.GAME_ID == '0021400087'])
fancy_shots(harden_shots_full.ix[harden_shots.GAME_ID == '0021400512'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The query in the below box no longer works thanks to the NBA restricting access to the data.
Step2: Wrapping data merge into a function
Step3: Drawing NBA Court to Scale
Step4: Unfortunately, the NBA has blocked access to the data that was used to construct the following shot charts. Prior to about February, they had data that contained very interesting metrics on individual shots. One of those metrics was the proximity of the nearest defender.
|
6,883
|
<ASSISTANT_TASK:>
Python Code:
#Imort modules
from __future__ import print_function, division, absolute_import
import numpy as np
#Import citlalatonac from pyaneti_extras, note that pyaneti has to be compiled in your machine
#and pyaneti has to be in your PYTHONPATH, e.g., you have to add in your bashrc file
#export PYTHONPATH=${PYTHONPATH}:/pathtopyaneti/pyaneti
#and replacing pathtopyaneti with the location of pyaneti in your machine
from pyaneti_extras.citlalatonac import citlali
#citlalatonac is the class that creates the spectroscopic-like time-series
#Do all the previous description with one Python command
star = citlali(tmin=0,tmax=50,kernel='QPK',kernel_parameters=[20,0.3,5],
amplitudes=[0.005,0.05,0.05,0.0,0.005,-0.05],time_series=['s2','s3'],seed=13)
#Let us see how the 3 time-series look in the 50 day window we created
star.plot()
#Create the random observation times
t = np.random.uniform(0,50,50)
#Let us create the data at times t
star.create_data(t=t)
#Let us plot where our data points are
star.plot()
#The input vector err has to have one white noise term per each time-series
star.add_white_noise(err=[0.001,0.005,0.010])
star.plot()
fname = 'data_3mdgp.dat'
star.save_data(fname)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: let us summon citlalatonac powers and create synthetic stellar data
Step2: At this point we have a model of the three signals created following the same underlying GP $G(t)$. Now it is time to create data taken at the times $\mathbf{t}$ that we can specify. The times vector $\mathbf{t}$ can be optimised to follow schedule requirements of given targets at different observatories, but for this example, we will just create 50 random times between 0 and 50 days.
Step3: The previous plot shows the positions at which we have created our observations. We still need to add some white noise to make the situation more realistic. We do this by passing an error bar for each time-series that we have created. We will add an error bar for 0.001 for $S_1$ (RV), 0.005 for $S_2$, and 0.10 for $S_3$ as err=[0.001,0.005,0.010].
Step4: Save the file as requested by pyaneti
|
6,884
|
<ASSISTANT_TASK:>
Python Code:
from tax_credit.framework_functions import \
generate_simulated_datasets, distance_comparison, \
test_cross_validated_sequences, \
test_novel_taxa_datasets
from os.path import expandvars, join
import pandas as pd
%matplotlib inline
project_dir = expandvars("../..")
data_dir = join(project_dir, "data")
# List databases as fasta/taxonomy file pairs
databases = {'B1-REF': ['../../data/ref_dbs/gg_13_8_otus/99_otus.fasta',
'../../data/ref_dbs/gg_13_8_otus/99_otu_taxonomy.txt',
"gg_13_8_otus", "GTGCCAGCMGCCGCGGTAA", "GGACTACHVGGGTWTCTAAT", "515f", "806r"],
'F1-REF': ['../../data/ref_dbs/unite_20.11.2016/sh_refs_qiime_ver7_99_20.11.2016_dev.fasta',
'../../data/ref_dbs/unite_20.11.2016/sh_taxonomy_qiime_ver7_99_20.11.2016_dev.txt',
"unite_20.11.2016", "ACCTGCGGARGGATCA", "GAGATCCRTTGYTRAAAGTT", "BITSf", "B58S3r"]
}
# Arrange data set / database info in data frame
simulated_community_definitions = pd.DataFrame.from_dict(databases, orient="index")
simulated_community_definitions.columns = ["Reference file path", "Reference tax path", "Reference id",
"Fwd primer", "Rev primer", "Fwd primer id", "Rev primer id"]
simulated_community_definitions
read_length = 250
iterations = 10
min_read_length = 80
generate_simulated_datasets(simulated_community_definitions, data_dir,
read_length, iterations, min_read_length=min_read_length,
levelrange=range(6, 1, -1), force=True)
distance_comparison(simulated_community_definitions, data_dir, 'cross-validated')
distance_comparison(simulated_community_definitions, data_dir, 'novel-taxa-simulations', samples=100)
test_cross_validated_sequences(data_dir)
test_novel_taxa_datasets(data_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we will import these to a dataframe and view it. You should not need to modify the following cell.
Step2: Generate "clean" reference taxonomy and sequence database by removing taxonomy strings with empty or ambiguous levels'
Step3: Data Leakage
Step4: Now check that the novel taxa distance distributions are ok.
Step5: For peace of mind, we can test our novel taxa and simulated community datasets to confirm that
Step6: As a sanity check, confirm that novel taxa were generated successfully.
|
6,885
|
<ASSISTANT_TASK:>
Python Code:
# Arithmetic operations
k = symbols('k')
s = symbols('s')
# 1D stencil
# multiplication addition
AI_dxi = k + 1 + k - 1
AI_dxxi = k + 1 + k - 1
AI_dxxij = 2*k + 2*k-1
# square stencil (all uses the same stencil mask)
# multiplication addition
AI_dxis = k**2 + k**2 - 1
AI_dxxis = k**2 + k**2 - 1
AI_dxxijs = k**2 + k**2 - 1
# I/O operations
# load
IO_dxi = k
IO_dxxi = k
IO_dxxij = 2*k
IO_square = k**2
# Operational intensity in single precision
print(AI_dxi/(4*IO_dxi))
print(AI_dxxi/(4*IO_dxxi))
print(AI_dxxij/(4*IO_dxxij))
print(AI_dxis/(4*IO_square))
print(AI_dxxis/(4*IO_square))
print(AI_dxxijs/(4*IO_square))
OI_dxi = lambdify(k,AI_dxi/(4*IO_dxi))
OI_dxxi = lambdify(k,AI_dxxi/(4*IO_dxxi))
OI_dxxij = lambdify(k,AI_dxxij/(4*IO_dxxij))
OI_dxis = lambdify(k,AI_dxis/(4*IO_dxxij))
OI_dxxis = lambdify(k,AI_dxxis/(4*IO_dxxij))
OI_dxxijs = lambdify(k,AI_dxxijs/(4*IO_dxxij))
# Arithmetic
# dxi dxxi dxxij multiplications additions duplicates
AI_acou = 0*AI_dxi + 3*AI_dxxi + 0*AI_dxxij + 3 + 5 - 2 * 2
AI_vti = 2 * ( 0*AI_dxi + 3*AI_dxxi + 0*AI_dxxij + 5 + 5 - 2 )
AI_tti = 2 * ( 0*AI_dxi + 3*AI_dxxi + 3*AI_dxxij + 44 + 17 - 8 )
AI_acoums = 0*AI_dxi + 3*s*AI_dxxi + 0*AI_dxxij + 3*s + 5*s - 2 * 2 *s
AI_vtims = 2 * ( 0*AI_dxi + 3*s*AI_dxxi + 0*AI_dxxij + 5*s + 5*s - 2*s )
AI_ttims = 2 * ( 0*AI_dxi + 3*s*AI_dxxi + 3*s*AI_dxxij + 44*s + 17*s - 8*s )
AI_acous = 0*AI_dxis + 3*AI_dxxis + 0*AI_dxxijs + 3 + 5 - 2 * 2
AI_vtis = 2 * ( 0*AI_dxis + 3*AI_dxxis + 0*AI_dxxijs + 5 + 5 - 2 * 2 )
AI_ttis = 2 * ( 0*AI_dxis + 3*AI_dxxis + 3*AI_dxxijs + 44 + 17 - 3*k**2 )
# I/O operations , the full domain of size N is in cache for best case scenario
#
IO_acou = 4 * N
IO_vti = 9 * N
IO_tti = 15 * N
IO_acoums = 3 * s * N + N
IO_vtims = 6 * s * N + 3 * N
IO_ttims = 6 * s * N + 9 * N
IO_acous = 4 * N
IO_vtis = 9 * N
IO_ttis = 15 * N
print(simplify(N*AI_acou/(4*IO_acou)))
print(simplify(N*AI_vti/(4*IO_vti)))
print(simplify(N*AI_tti/(4*IO_tti)))
print(simplify(N*AI_acoums/(4*IO_acoums)))
print(simplify(N*AI_vtims/(4*IO_vtims)))
print(simplify(N*AI_ttims/(4*IO_ttims)))
print(simplify(N*AI_acous/(4*IO_acous)))
print(simplify(N*AI_vtis/(4*IO_vtis)))
print(simplify(N*AI_ttis/(4*IO_ttis)))
OI_acou = lambdify(k,N*AI_acou/(4*IO_acou))
OI_vti = lambdify(k,N*AI_vti/(4*IO_vti))
OI_tti = lambdify(k,N*AI_tti/(4*IO_tti))
OI_acoums = lambdify((k,s),N*AI_acoums/(4*IO_acoums))
OI_vtims = lambdify((k,s),N*AI_vtims/(4*IO_vtims))
OI_ttims = lambdify((k,s),N*AI_ttims/(4*IO_ttims))
OI_acous = lambdify(k,N*AI_acous/(4*IO_acous))
OI_vtis = lambdify(k,N*AI_vtis/(4*IO_vtis))
OI_ttis = lambdify(k,N*AI_ttis/(4*IO_ttis))
print(limit(OI_acou(k),k,51))
print(limit(OI_vti(k),k,51))
print(limit(OI_tti(k),k,51))
print(limit(OI_acoums(3,s),s,128))
print(limit(OI_vtims(3,s),s,128))
print(limit(OI_ttims(3,s),s,128))
print(limit(OI_acous(k),k,51))
print(limit(OI_vtis(k),k,51))
print(limit(OI_ttis(k),k,51))
kk=[3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]
ss=[2,4,8,16,32,64]
OI_wave=np.zeros((15,6))
OI_wavems=np.zeros((15,6,3))
OI=np.zeros((15,3))
for i in range(0,15):
OI_wave[i,0]=OI_acou(kk[i])
OI_wave[i,1]=OI_vti(kk[i])
OI_wave[i,2]=OI_tti(kk[i])
OI_wave[i,3]=OI_acous(kk[i])
OI_wave[i,4]=OI_vtis(kk[i])
OI_wave[i,5]=OI_ttis(kk[i])
OI[i,0]=OI_dxi(kk[i])
OI[i,1]=OI_dxxi(kk[i])
OI[i,2]=OI_dxxij(kk[i])
for j in range(0,6):
OI_wavems[i,j,0]=OI_acoums(kk[i],ss[j])
OI_wavems[i,j,1]=OI_vtims(kk[i],ss[j])
OI_wavems[i,j,2]=OI_ttims(kk[i],ss[j])
import matplotlib.pyplot as plt
fig = plt.figure()
plt.hold("off")
acou = plt.plot(OI_wave[:,0],label='acou') # this is how you'd plot a single line...
vti = plt.plot(OI_wave[:,1],label='vti') # this is how you'd plot a single line...
tti = plt.plot(OI_wave[:,2],label='tti') # this is how you'd plot a single line...
fig = plt.figure()
plt.hold("off")
acou = plt.plot(OI_wave[:,3],label='acous') # this is how you'd plot a single line...
vti = plt.plot(OI_wave[:,4],label='vtis') # this is how you'd plot a single line...
tti = plt.plot(OI_wave[:,5],label='ttis') # this is how you'd plot a single line...
fig = plt.figure()
plt.hold("off")
acou = plt.plot(OI_wavems[:,2,0],label='acous') # this is how you'd plot a single line...
vti = plt.plot(OI_wavems[:,2,1],label='vtis') # this is how you'd plot a single line...
tti = plt.plot(OI_wavems[:,2,2],label='ttis') # this is how you'd plot a single line...
fig = plt.figure()
plt.hold("off")
acou = plt.plot(OI_wavems[:,5,0],label='acous') # this is how you'd plot a single line...
vti = plt.plot(OI_wavems[:,5,1],label='vtis') # this is how you'd plot a single line...
tti = plt.plot(OI_wavems[:,5,2],label='ttis') # this is how you'd plot a single line...
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Operational intensity of wave equations
|
6,886
|
<ASSISTANT_TASK:>
Python Code:
print('Hello, world')
print(10)
print(3.14159)
print(True)
# Print an integer
print(10)
# Print a float
print(3.14159)
# Print a bool
print(True)
print(1 + 1) # this should be 2
print(4 / 2) # this should be 2
print(40 - 38) # this should be 2
print(2^4) # this should be 16
print(2**4) # this should be 16
print('The man in black fled across the desert' + ' and the gunslinger followed.')
# Assign some values to variables.
a = 1
b = 2
c = 2 * a / b - 2*b # Should be -3
print(c)
# Generate some lists and arrays.
example_list = [1, 2, 3]
example_tuple = (1, 2, 3)
list_of_lists = [[1, 2, 3]]
tuple_of_lists = ([1, 2, 3], [200, 2], 1)
mixed_type_list = [1, 2, 'phsyical', 'biology', 10.028]
# Index some values.
print(example_list[0]) # should be 1
print(example_tuple[2]) # should be 3
print(example_list[0:2]) # should be 1 and 2
print(example_list[-1]) # This will give the last entry of the list
# Change the first entry to 1000
example_list[0] = 1000
print(example_list)
example_tuple[0] = 1000
print(example_tuple)
# Do list arithmetic.
next_list = [2, 4, 6]
print(next_list * 2) # Should be [2, 4, 6, 2, 4, 6]
print(next_list * 5) # Should be the same list repeated five times.
# Import the NumPy module and give it an alias
import numpy as np
# Demonstrate the power of numpy.
print(np.exp(-1))
print(np.sin(1))
print(np.cos(2 * np.sin(5)))
# Demonstrate the magic of numpy arrays
my_array = np.array([1, 2, 3])
#Basic arithmetic
print(my_array * 2) # Element-wise multiplication
print(my_array / 3) # Element-wise division
print(my_array * my_array) # Element-wise multiplication
print(np.dot(my_array, my_array)) # Dot product of two arrays
# Create multi-dimensional arrays
one_dimensional_array = np.array([1, 2, 3, 4])
two_dimensional_array = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
print(one_dimensional_array)
print(two_dimensional_array)
# Automatically generate series of values.
linear_spaced = np.linspace(0, 10, 1000) # Generates 1000 points between 0 and 10
log_spaced = np.logspace(-1, 1, 200) # Generates 200 points logarithmically spaced
# between .1 and 10
aranged = np.arange(0, 50, 2) # Generates values between 0 and 50 taking steps of
# two.
# Generate some variable with different types.
string_type = 'string'
float_type = 3.14159
bool_type = True
int_type = 3
list_type = ['a', 'b', 2]
tuple_type = ('c', 'd', 3)
nparray_type = np.ones_like(tuple_type)
# Print their types
print(type(string_type))
print(type(float_type))
print(type(bool_type))
print(type(int_type))
print(type(list_type))
print(type(tuple_type))
print(type(nparray_type))
# Change the type of a varible
int_to_float = float(int_type)
tuple_to_list = list(tuple_type)
print(int_to_float)
print(tuple_to_list)
# Convert a float to a string.
value_one = 10
value_two = 20
print("The product of value_one and value_two is " + str(value_one * value_two))
# Print the length, size, and shape of our variables.
big_array = np.array([[1, 2, 3], [4, 5, 6]])
print(len(big_array))
print(np.shape(big_array))
whos
def adding_machine(value_1, value_2, print_value=True):
Adds together value_1 with value_2. If `print_value` is True,
the resulting value will be printed to the screen.
# Merge the strings together.
added_values = value_1 + value_2
# Determine if the new string should be printed.
if print_value == True:
print(added_values)
# Return the smashed string.
return added_values
# Add some various values together.
x = adding_machine(10, 2)
y = adding_machine(1.01, 1.08)
z = adding_machine('Python', ' Rules')
# Print the variables we assigned them to.
print(x, y, z)
# Add some various values together.
x = adding_machine(10, 2, print_value=False)
y = adding_machine(1.01, 1.08, print_value=False)
z = adding_machine('Python', ' Rules', print_value=False)
# Print the variables we assigned them to.
print(x, y, z)
# Set the initial number of cells in the experiment.
number_of_cells = 1
# Set the number of division cycles for the experiment.
number_of_divisions = 10
# Set a list of number of cells at division d and start with 1 cell
N_d = [number_of_cells]
# Loop through each division event
for i in range(number_of_divisions):
# Make the cells duplicate
number_of_cells = number_of_cells * 2
# Add the new number of cells to our storage list.
N_d.append(number_of_cells)
# Print the result of our simulation.
print(N_d)
# Load the plotting utility matplotlib with an alias plt.
import matplotlib.pyplot as plt
# The following line is used for this tutorial only and allows plots to be
# displayed in this notebook.
%matplotlib inline
# Establish a vector of the division cycles
division_vector = np.arange(0, number_of_divisions + 1, 1)
# Generate the plot.
plt.plot(division_vector, N_d, 'o', label='simulation')
# Set the axis labels.
plt.xlabel('number of divisions')
plt.ylabel('number of cells')
# Add a legend
plt.legend()
import seaborn as sns
# Generate the plot.
plt.plot(division_vector, N_d, 'o', label='simulation')
# Set the axis labels.
plt.xlabel('number of divisions')
plt.ylabel('number of cells')
# Add a legend
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the above code, we called the print function and passed the text Hello, world. surrounded by single quotation marks ''. The text was passed as an argument to the print function by placing it in the parentheses. The quotation marks determined the type of the argument to be text, known as a string. While we used single quotations in the above code, we could also use double quotations "". These are exchangeable in Python.
Step2: In the above code, it would be nice to be able to add a comment about what the type of each variable is that we are printing, but we don't want this comment to be interpreted by Python. By adding a pound symbol (#) before a line of code, we can force Python to ignore it.
Step3: We see that we get the same result.
Step4: Everything looks good, but what happened at the end? In Python, you exponentiate terms using a double asterisk (**) and not a carrot (^). The carrot executes a bitwise operation which is completely different than exponentiation.
Step5: Note that the all mathematical operations other than addition (+) can only be done on ints, floats, or bool. The addition operator can be performed on strings too! Let's show this by adding two halves of a sentence together. We'll use the opening sentence from my favorite fiction series The Dark Tower as an example.
Step6: While all of these operations are simple, we would like some way in which can store a number for further use. We can do this by assigning the output of an operation to a variable.
Step7: Lists and tuples
Step8: Note that lists and arrays can have mixed types. Once we have a list or a tuple, we can extract a single value or a range of values by indexing.
Step9: To get the first value of the list, I started with zero. In Python, indexing begins at 0. This is different than in other programming languages such as MATLAB which begins at 1.
Step10: Python yelled at me and told me that I can't assign a current value in a tuple. This is a very important point. For example, if you want to have some process output a series of values you don't want to change, put them in a tuple. Otherwise, put them in a list or an array.
Step11: So how do we do more complicated mathematical operations on a series of numbers? How can I multiply each element of an array by five? How do I take the exponential of each element? How do I perform a dot product between two series? To perform such tasks, we will have to import another python module, NumPy.
Step12: Whenever you write a Python script, you should always import the modules you will need at the very beginning. That way, all of the required packages will be loaded and ready to go when you start loading them.
Step13: With numpy comes a new data type called numpy arrays. These are series of values which are mutable (just like lists) which cannot have a mixed data type (unlike lists and tuples). We can also perform mathematical operations on arrays.
Step14: Numpy arrays are not limited to being one-dimensional. We can create n-dimensional numpy arrays with ease.
Step15: We can even make numpy make series of values for us! This will be very useful once we start manipulating images and making plots.
Step16: It is impossible to perform scientific computing in Python without using numpy.
Step17: Note that even though the elements of our list_type and tuple_type have mixed types, this command tells us what the type of the object is (i.e. list and tuple). We can force changes in the type of a variable with ease. Let's make that int a float and that tuple a list.
Step18: We can also convert numbers to strings. This is useful if we want to print a sentence including the output from some other operation.
Step19: In addition to type, we can get some information about the shape and length of all of our variables. We can do this using the len function and the np.shape function that comes with NumPy.
Step20: We've created a lot of variable so far in this tutorial. It's nice to be able to look at what variables exist in our environment as well as get some information. Let's take a look at everything we've made so far.
Step22: We see that we get a relatively nicely ordered list of all of our variables, the type of the variable, and then information about their contents. We can see for our lists and arrays it tells us the number of rows and columns, how many elements there are, what the type of those elements are and so forth.
Step23: We did a lot of complicated procedures in this code, so let's go through it piece by piece.
Step24: Since we added the automatic printing of values as a keyword argument, we can easily tell our function to stop printing things.
Step25: While this function was very simple (and frankly unnecessary), being able to write functions to automate tasks will be very valuable in the future $-$ especially when we start working with images.
Step26: We covered a lot of syntax in that cell, so let's take a look at it piece by piece.
Step27: It looks like our function worked! But can we tell this is exponential growth? Because this is a simple case, we can see that the number of cells are doubling at each division. However, It would be very useful to plot these results.
Step28: We would like to see if our simulated cells appear to grow exponentially with time. To do so, we would like to plot the number of cells we have at division number $d$ as a function of $d$. Before we do any plotting, let's generate a vector of division times that matches the size of our N_d vector. We can do this using some of the arrangement methods that we used earlier with numpy. Since we had measure at $d=0$, our new division vector must have a length of number_of_divisions + 1.
Step29: Now all that is left is to plot it! We can use the plot function of matplotlib to generate our scatterplot. We'll choose small circles as our markers and provide the approriate $x$ and $y$ labels as we always should. We'll also add a legend to our plot to show that these data poitns are from a simulation.
Step30: In calling the plt.plot() function, we first passed it the $x$ and $y$ data, told it to plot the points as circles (through the 'o' argument), and finally gave it a label.
Step31: Importing this function will likely give you an warning (as is seen above), but this can be ignored. With this imported, let's go ahead and remake the plot from above.
|
6,887
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function
import mdtraj as md
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
traj = md.load('ala2.h5')
traj
pca1 = PCA(n_components=2)
traj.superpose(traj, 0)
reduced_cartesian = pca1.fit_transform(traj.xyz.reshape(traj.n_frames, traj.n_atoms * 3))
print(reduced_cartesian.shape)
plt.figure()
plt.scatter(reduced_cartesian[:, 0], reduced_cartesian[:,1], marker='x', c=traj.time)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.title('Cartesian coordinate PCA: alanine dipeptide')
cbar = plt.colorbar()
cbar.set_label('Time [ps]')
pca2 = PCA(n_components=2)
from itertools import combinations
# this python function gives you all unique pairs of elements from a list
atom_pairs = list(combinations(range(traj.n_atoms), 2))
pairwise_distances = md.geometry.compute_distances(traj, atom_pairs)
print(pairwise_distances.shape)
reduced_distances = pca2.fit_transform(pairwise_distances)
plt.figure()
plt.scatter(reduced_distances[:, 0], reduced_distances[:,1], marker='x', c=traj.time)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.title('Pairwise distance PCA: alanine dipeptide')
cbar = plt.colorbar()
cbar.set_label('Time [ps]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets load up our trajectory. This is the trajectory that we generated in
Step2: Create a two component PCA model, and project our data down into this
Step3: Now we can plot the data on this projection.
Step4: Lets try cross-checking our result by using a different feature space that isn't sensitive to alignment, and instead to "featurize" our trajectory by computing the pairwise distance between every atom in each frame, and using that as our high dimensional input space for PCA.
|
6,888
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import pandas as pd
import numpy as np
import json
import os,sys
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
pd.options.display.float_format = '{:,.2f}'.format
dataset_base_dir = './group_agnostic_fairness/data/law_school/'
dataset_file_name = 'lsac.csv'
file_path = os.path.join(dataset_base_dir,dataset_file_name)
with open(file_path, "r") as file_name:
temp_df = pd.read_csv(file_name)
# Columns of interest
df = temp_df[['zfygpa','zgpa','DOB_yr','parttime','gender','race','tier','fam_inc','lsat','ugpa','pass_bar','index6040']].copy()
renameColumns={'gender':'sex',
'index6040':'weighted_lsat_ugpa',
'fam_inc':'family_income',
'tier':'cluster_tier',
'parttime':'isPartTime'}
target_variable = 'pass_bar'
target_value = 'Passed'
# Renaming columns
df = df.rename(columns = renameColumns)
columns = renameColumns.values()
# NaN in 'pass_bar' refer to dropouts. Considering NaN as failing the bar.
df['pass_bar'] = df['pass_bar'].fillna(value=0.0)
df['pass_bar'] = df.apply(lambda x: 'Passed' if x['pass_bar']==1.0 else 'Failed_or_not_attempted', axis=1).astype('category')
df['zfygpa'] = df['zfygpa'].fillna(value=0.0)
df['zgpa'] = df['zgpa'].fillna(value=0.0)
df['DOB_yr'] = df['DOB_yr'].fillna(value=0.0)
df = df.dropna()
# Binarize target_variable
df['isPartTime'] = df.apply(lambda x: 'Yes' if x['isPartTime']==1.0 else 'No', axis=1).astype('category')
# Process protected-column values
race_dict = {3.0:'Black',7.0:'White'}
sex_dict = {'female':'Female','male':'Male'}
df['race'] = df.apply(lambda x: race_dict[x['race']] if x['race'] in race_dict.keys() else 'Other', axis=1).astype('category')
df['sex'] = df.apply(lambda x: sex_dict[x['sex']] if x['sex'] in sex_dict.keys() else 'Other', axis=1).astype('category')
df.head()
train_df, test_df = train_test_split(df, test_size=0.30, random_state=42)
output_file_path = os.path.join(dataset_base_dir,'train.csv')
with open(output_file_path, mode="w") as output_file:
train_df.to_csv(output_file,index=False,columns=columns,header=False)
output_file.close()
output_file_path = os.path.join(dataset_base_dir,'test.csv')
with open(output_file_path, mode="w") as output_file:
test_df.to_csv(output_file,index=False,columns=columns,header=False)
output_file.close()
IPS_example_weights_without_label = {
0: (len(train_df))/(len(train_df[(train_df.race != 'Black') & (train_df.sex != 'Female')])), # 00: White Male
1: (len(train_df))/(len(train_df[(train_df.race != 'Black') & (train_df.sex == 'Female')])), # 01: White Female
2: (len(train_df))/(len(train_df[(train_df.race == 'Black') & (train_df.sex != 'Female')])), # 10: Black Male
3: (len(train_df))/(len(train_df[(train_df.race == 'Black') & (train_df.sex == 'Female')])) # 11: Black Female
}
output_file_path = os.path.join(dataset_base_dir,'IPS_example_weights_without_label.json')
with open(output_file_path, mode="w") as output_file:
output_file.write(json.dumps(IPS_example_weights_without_label))
output_file.close()
print(IPS_example_weights_without_label)
IPS_example_weights_with_label = {
0: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race != 'Black') & (train_df.sex != 'Female')])), # 000: Negative White Male
1: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race != 'Black') & (train_df.sex == 'Female')])), # 001: Negative White Female
2: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race == 'Black') & (train_df.sex != 'Female')])), # 010: Negative Black Male
3: (len(train_df))/(len(train_df[(train_df[target_variable] != target_value) & (train_df.race == 'Black') & (train_df.sex == 'Female')])), # 011: Negative Black Female
4: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race != 'Black') & (train_df.sex != 'Female')])), # 100: Positive White Male
5: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race != 'Black') & (train_df.sex == 'Female')])), # 101: Positive White Female
6: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race == 'Black') & (train_df.sex != 'Female')])), # 110: Positive Black Male
7: (len(train_df))/(len(train_df[(train_df[target_variable] == target_value) & (train_df.race == 'Black') & (train_df.sex == 'Female')])), # 111: Positive Black Female
}
output_file_path = os.path.join(dataset_base_dir,'IPS_example_weights_with_label.json')
with open(output_file_path, mode="w") as output_file:
output_file.write(json.dumps(IPS_example_weights_with_label))
output_file.close()
print(IPS_example_weights_with_label)
cat_cols = train_df.select_dtypes(include='category').columns
vocab_dict = {}
for col in cat_cols:
vocab_dict[col] = list(set(train_df[col].cat.categories))
output_file_path = os.path.join(dataset_base_dir,'vocabulary.json')
with open(output_file_path, mode="w") as output_file:
output_file.write(json.dumps(vocab_dict))
output_file.close()
print(vocab_dict)
temp_dict = train_df.describe().to_dict()
mean_std_dict = {}
for key, value in temp_dict.items():
mean_std_dict[key] = [value['mean'],value['std']]
output_file_path = os.path.join(dataset_base_dir,'mean_std.json')
with open(output_file_path, mode="w") as output_file:
output_file.write(json.dumps(mean_std_dict))
output_file.close()
print(mean_std_dict)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Processing original dataset
Step3: Shuffle and Split into Train (70%) and Test set (30%)
Step4: Computing Invese propensity weights for each subgroup, and writes to directory.
Step5: Construct vocabulary.json, and write to directory.
Step6: Construct mean_std.json, and write to directory
|
6,889
|
<ASSISTANT_TASK:>
Python Code:
def net_force(mass, acceleration):
return mass * acceleration
def net_force(mass, acceleration):
force = mass * acceleration
return force
print(net_force(10,10))
def net_force(mass, acceleration):
Calculates f=ma, returns force.
We assume mass & acceleration are of type int/float.
return mass * acceleration
def net_force(mass, acceleration):
Calculates f=ma, returns force.
We assume mass & acceleration are of type int/float.
return mass * acceleration
print(net_force("10", 10))
help(int)
def add_eight_1(x):
Takes a integer x, and returns x + 8
return x + 8
help(add_eight_1)
def add_eight_2(x):
# Takes a integer x, and returns x + 8
return x + 8
help(add_eight_2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Literally this function takes two objects and returns mass * acceleration; But what does that actually mean? And why should we care?
Step3: Defining Force
Step5: Docstrings
Step7: This output might surprise you (and I'll explain why this happens in later lectures). Notice that the function net_force says it expects numbers. We passed in a string and an integer. And so, if we are using the function in an unexpected/unintended way should we surprised when things do not work as we expect?
Step9: we can use the "help" command to get information about a python objects. As a implementation detail, "help" will return the docstring of the function. Thus, if we use docstrings then other developers can call 'help' on our functions to find out what they do. Cool right?
|
6,890
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
def split_data(chars, batch_size, num_steps, split_frac=0.9):
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
with tf.name_scope("RNN_layers"):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
with tf.name_scope("RNN_init_state"):
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
with tf.name_scope("RNN_forward"):
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one row for each cell output
with tf.name_scope('sequence_reshape'):
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN putputs to a softmax layer and calculate the cost
with tf.name_scope('logits'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
with tf.name_scope('predictions'):
preds = tf.nn.softmax(logits, name='predictions')
with tf.name_scope('cost'):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
# Optimizer for training, using gradient clipping to control exploding gradients
with tf.name_scope('train'):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
file_writer = tf.summary.FileWriter('./logs/3', sess.graph)
!mkdir -p checkpoints/anna
epochs = 10
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use.
Step3: Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Step4: I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
Step5: Hyperparameters
Step6: Write out the graph for TensorBoard
Step7: Training
Step8: Sampling
|
6,891
|
<ASSISTANT_TASK:>
Python Code:
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode,iplot
init_notebook_mode(connected=True)
import pandas as pd
df = pd.read_csv('2014_World_Power_Consumption')
df.head()
data = dict(type ='choropleth',
locations=df['Country'],
colorscale = 'Viridis',
reversescale='True',
locationmode= 'country names',
z=df['Power Consumption KWH'],
text=df['Country'],
colorbar = {'title': 'Power Consumption in KiloWatt Hours'})
layout = dict(title='2014 Power Consumption',
geo = dict(showframe=False,projection={'type':'Mercator'}))
choromap = go.Figure(data = [data],layout = layout)
iplot(choromap,validate=False)
df2 = pd.read_csv('2012_Election_Data')
df2.head()
data = dict(type = 'choropleth',
colorscale='Viridis',
locations = df2['State Abv'],
z = df2['Voting-Age Population (VAP)'],
locationmode = 'USA-states',
text = df2['State'],
colorbar = {'title': 'Voting Age Population'})
layout = dict(title = 'Voting Age Population Per State',
geo = dict(scope='usa',showlakes=True,lakecolor='rgb(85.173.240)'))
choromap = go.Figure(data = [data],layout = layout)
iplot(choromap,validate=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import pandas and read the csv file
Step2: Check the head of the DataFrame.
Step3: Referencing the lecture notes, create a Choropleth Plot of the Power Consumption for Countries using the data and layout dictionary.
Step4: USA Choropleth
Step5: Check the head of the DataFrame.
Step6: Now create a plot that displays the Voting-Age Population (VAP) per state. If you later want to play around with other columns, make sure you consider their data type. VAP has already been transformed to a float for you.
|
6,892
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
width = 3.18 # cm
thickness = 0.46 # cm
length = 48 # cm
volume = length*width*thickness # cm^3
density = 2.77 # g/cm
massBar = density*volume
print('mass of bar =', massBar, 'g')
gammaL = 1.88 # from lab procedures (for first harmonic)
dirPath = os.path.realpath('.')
fileName = 'rawData/lab7data.xlsx'
filePath = os.path.join(dirPath, fileName)
df = pd.read_excel(filePath,sheetname="Part2",header=0)
print(df)
cols = df.columns
angularFreq = 2*np.pi*df['Frequency (Hz)']
k = angularFreq**2/(gammaL)**4 * 3*(massBar/1000+4.167*df['Weight']/1000)
print('k average =', np.average(k), 'N/m')
print('k max =', np.max(k), 'N/m')
print('k min =', np.min(k), 'N/m')
print('k std =', np.std(k), 'N/m')
plt.figure(1)
plt.errorbar(df['Weight'], df['Frequency (Hz)'], yerr=2*df['error'], fmt='o')
plt.xlabel('Weight (g)')
plt.ylabel('Frequency (Hz)')
plt.title('Part 2: Dynamic Loading')
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constants
Step2: Part 2
Step3: Now lets get to the math.
|
6,893
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
from statsmodels.graphics.gofplots import ProbPlot
plt.style.use('seaborn') # pretty matplotlib plots
plt.rc('font', size=14)
plt.rc('figure', titlesize=18)
plt.rc('axes', labelsize=15)
plt.rc('axes', titlesize=18)
auto = pd.read_csv('../../../../data/ISLR/datasets/Auto.csv', na_values=['?'])
auto.dropna(inplace=True)
auto.reset_index(drop=True, inplace=True)
model_f = 'mpg ~ cylinders + \
displacement + \
horsepower + \
weight + \
acceleration + \
year + \
origin'
model = smf.ols(formula=model_f, data=auto)
model_fit = model.fit()
# fitted values (need a constant term for intercept)
model_fitted_y = model_fit.fittedvalues
# model residuals
model_residuals = model_fit.resid
# normalized residuals
model_norm_residuals = model_fit.get_influence().resid_studentized_internal
# absolute squared normalized residuals
model_norm_residuals_abs_sqrt = np.sqrt(np.abs(model_norm_residuals))
# absolute residuals
model_abs_resid = np.abs(model_residuals)
# leverage, from statsmodels internals
model_leverage = model_fit.get_influence().hat_matrix_diag
# cook's distance, from statsmodels internals
model_cooks = model_fit.get_influence().cooks_distance[0]
plot_lm_1 = plt.figure(1)
plot_lm_1.set_figheight(8)
plot_lm_1.set_figwidth(12)
plot_lm_1.axes[0] = sns.residplot(model_fitted_y, 'mpg', data=auto,
lowess=True,
scatter_kws={'alpha': 0.5},
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8})
plot_lm_1.axes[0].set_title('Residuals vs Fitted')
plot_lm_1.axes[0].set_xlabel('Fitted values')
plot_lm_1.axes[0].set_ylabel('Residuals')
# annotations
abs_resid = model_abs_resid.sort_values(ascending=False)
abs_resid_top_3 = abs_resid[:3]
for i in abs_resid_top_3.index:
plot_lm_1.axes[0].annotate(i,
xy=(model_fitted_y[i],
model_residuals[i]));
QQ = ProbPlot(model_norm_residuals)
plot_lm_2 = QQ.qqplot(line='45', alpha=0.5, color='#4C72B0', lw=1)
plot_lm_2.set_figheight(8)
plot_lm_2.set_figwidth(12)
plot_lm_2.axes[0].set_title('Normal Q-Q')
plot_lm_2.axes[0].set_xlabel('Theoretical Quantiles')
plot_lm_2.axes[0].set_ylabel('Standardized Residuals');
# annotations
abs_norm_resid = np.flip(np.argsort(np.abs(model_norm_residuals)), 0)
abs_norm_resid_top_3 = abs_norm_resid[:3]
for r, i in enumerate(abs_norm_resid_top_3):
plot_lm_2.axes[0].annotate(i,
xy=(np.flip(QQ.theoretical_quantiles, 0)[r],
model_norm_residuals[i]));
plot_lm_3 = plt.figure(3)
plot_lm_3.set_figheight(8)
plot_lm_3.set_figwidth(12)
plt.scatter(model_fitted_y, model_norm_residuals_abs_sqrt, alpha=0.5)
sns.regplot(model_fitted_y, model_norm_residuals_abs_sqrt,
scatter=False,
ci=False,
lowess=True,
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8})
plot_lm_3.axes[0].set_title('Scale-Location')
plot_lm_3.axes[0].set_xlabel('Fitted values')
plot_lm_3.axes[0].set_ylabel('$\sqrt{|Standardized Residuals|}$');
# annotations
abs_sq_norm_resid = np.flip(np.argsort(model_norm_residuals_abs_sqrt), 0)
abs_sq_norm_resid_top_3 = abs_sq_norm_resid[:3]
for i in abs_norm_resid_top_3:
plot_lm_3.axes[0].annotate(i,
xy=(model_fitted_y[i],
model_norm_residuals_abs_sqrt[i]));
plot_lm_4 = plt.figure(4)
plot_lm_4.set_figheight(8)
plot_lm_4.set_figwidth(12)
plt.scatter(model_leverage, model_norm_residuals, alpha=0.5)
sns.regplot(model_leverage, model_norm_residuals,
scatter=False,
ci=False,
lowess=True,
line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8})
plot_lm_4.axes[0].set_xlim(0, 0.20)
plot_lm_4.axes[0].set_ylim(-3, 5)
plot_lm_4.axes[0].set_title('Residuals vs Leverage')
plot_lm_4.axes[0].set_xlabel('Leverage')
plot_lm_4.axes[0].set_ylabel('Standardized Residuals')
# annotations
leverage_top_3 = np.flip(np.argsort(model_cooks), 0)[:3]
for i in leverage_top_3:
plot_lm_4.axes[0].annotate(i,
xy=(model_leverage[i],
model_norm_residuals[i]))
# shenanigans for cook's distance contours
def graph(formula, x_range, label=None):
x = x_range
y = formula(x)
plt.plot(x, y, label=label, lw=1, ls='--', color='red')
p = len(model_fit.params) # number of model parameters
graph(lambda x: np.sqrt((0.5 * p * (1 - x)) / x),
np.linspace(0.001, 0.200, 50),
'Cook\'s distance') # 0.5 line
graph(lambda x: np.sqrt((1 * p * (1 - x)) / x),
np.linspace(0.001, 0.200, 50)) # 1 line
plt.legend(loc='upper right');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the data, and getting rid of NAs
Step2: The fitted linear regression model, using statsmodels R style formula API
Step3: Calculations required for some of the plots
Step4: And now, the actual plots
Step5: 2. QQ plot
Step6: 3. Scale-Location Plot
Step7: 4. Leverage plot
|
6,894
|
<ASSISTANT_TASK:>
Python Code::
import tensorflow_datasets as tfds
ds, meta = tfds.load('citrus_leaves', with_info=True, split='train', shuffle_files=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
6,895
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
%matplotlib inline
import matplotlib.pyplot as plt
import openpathsampling as paths
old_storage = paths.Storage("ad_tps_equil.nc", "r")
engine = old_storage.engines['300K']
C_7eq = old_storage.volumes['C_7eq']
alpha_R = old_storage.volumes['alpha_R']
traj = old_storage.samplesets[-1][0].trajectory
# load CVs for plotting
phi = old_storage.cvs['phi']
psi = old_storage.cvs['psi']
network = paths.FixedLengthTPSNetwork(C_7eq, alpha_R,
length=400).named('fixed_tps_network')
trajectories = []
i=0
while len(trajectories) == 0 and i < 5:
max_len = 200 + i*50
fwd_traj = engine.generate(traj[-1], [lambda traj, foo: len(traj) < max_len])
bkwd_traj = engine.generate(traj[0], [lambda traj, foo: len(traj) < max_len], direction=-1)
new_traj = bkwd_traj[:-1] + traj + fwd_traj[1:]
trajectories = network.sampling_ensembles[0].split(new_traj)
print(trajectories)
# raises an error if we still haven't found a suitable trajectory
trajectory = trajectories[0]
plt.plot(phi(trajectory), psi(trajectory), '.k')
plt.plot(phi(traj), psi(traj))
storage = paths.Storage("ad_fixed_tps_traj.nc", "w")
storage.save(trajectory)
storage.save(network)
storage.save(engine); # not technically required, saved with trajectory
storage.close()
old_storage.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading from storage
Step2: Building a trajectory to suit the ensemble
Step3: Plot the trajectory
Step4: Save stuff
|
6,896
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('seaborn-whitegrid')
def f(x):
The function to predict.
return x * np.sin(x)
def make_data(N, rseed=1):
np.random.seed(rseed)
# Create some observations with noise
X = np.random.uniform(low=0.1, high=9.9, size=N)
X = np.atleast_2d(X).T
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
return X, y, dy
X, y, dy = make_data(20)
# Get the master version of scikit-learn; new GP code isn't in release
# This needs to compile things, so it will take a while...
# Uncomment the following:
# !pip install git+git://github.com/scikit-learn/scikit-learn.git
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF as SquaredExponential
from sklearn.gaussian_process.kernels import ConstantKernel as Amplitude
# Instanciate a Gaussian Process model
kernel = Amplitude(1.0, (1E-3, 1E3)) * SquaredExponential(10, (1e-2, 1e2))
# Instantiate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel,
alpha=(dy / y)**2, # fractional errors in data
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the hyper-parameters
gp.fit(X, y)
gp.kernel_
# note: gp.kernel is the initial kernel
# gp.kernel_ (with an underscore) is the fitted kernel
gp.kernel_.get_params()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x_pred = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x_pred, return_std=True)
def plot_results(X, y, dy, x_pred, y_pred, sigma):
fig = plt.figure(figsize=(8, 6))
plt.plot(x_pred, f(x_pred), 'k:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='k.', markersize=10, label=u'Observations',
ecolor='gray')
plt.plot(x_pred, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x_pred, x_pred[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.3, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left');
plot_results(X, y, dy, x_pred, y_pred, sigma)
import scipy.linalg
KXX = gp.kernel_(X)
A = KXX + np.diag((dy/y)**2.)
L = scipy.linalg.cholesky(A, lower=True)
KXXp = gp.kernel_(x_pred, X)
KXpXp = gp.kernel_(x_pred)
alpha = scipy.linalg.cho_solve((L, True), y)
y_pred = np.dot(KXXp, alpha) + np.mean(y, axis=0)
v = scipy.linalg.cho_solve((L, True), KXXp.T)
y_pred_fullcov = KXpXp - KXXp.dot(v)
sigma = np.sqrt(np.diag(y_pred_fullcov))
plot_results(X, y, dy, x_pred, y_pred, sigma)
import GPy
kernels = [GPy.kern.RBF(input_dim=1),
GPy.kern.Brownian(input_dim=1),
GPy.kern.Matern32(input_dim=1),
GPy.kern.Matern52(input_dim=1),
GPy.kern.ExpQuad(input_dim=1),
GPy.kern.Cosine(input_dim=1)]
names = ['Gaussian', 'Brownian', 'Mattern32', 'Matern52', 'ExpQuad', 'Cosine']
fig, axs = plt.subplots(3, 2, figsize=(12, 12), sharex=True, sharey=True)
axs = axs.ravel()
for i, k in enumerate(kernels):
m = GPy.models.GPRegression(X, y[:,None], kernel=k)
m.optimize()
m.plot_f(ax=axs[i], plot_data=True, samples=4, legend=False, plot_limits=[0, 10])
# plotting four samples of the GP posterior too
axs[i].errorbar(X, y, yerr=dy, fmt="o", c='k')
axs[i].set_title(names[i])
axs[i].plot(x_pred, f(x_pred), 'k:', label=u'$f(x) = x\,\sin(x)$')
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Make Some Data
Step3: Gaussian Process Regression with Scikit-Learn
Step4: Gaussian Processes by-hand
Step5: Quick kernel comparison with GPy
|
6,897
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import pandas as pd
import numpy as np
import linecache, bisect
import os
spath = os.getcwd()
path = spath.split("/")
date = path[len(path)-1]
def makeGraphqlRequest(query, variables):
return GraphQLClient.request(query, variables)
suspicious_query = query($date:SpotDateType) {
dns {
suspicious(date:$date)
{ clientIp
clientIpSev
dnsQuery
dnsQueryClass
dnsQueryClassLabel
dnsQueryRcode
dnsQueryRcodeLabel
dnsQueryRep
dnsQuerySev
dnsQueryType
dnsQueryTypeLabel
frameLength
frameTime
networkContext
score
tld
unixTimestamp
}
}
}
##If you want to use a different date for your query, switch the
##commented/uncommented following lines
variables={
'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')
# 'date': "2016-10-08"
}
suspicious_request = makeGraphqlRequest(suspicious_query,variables)
##The variable suspicious_request will contain the resulting data from the query.
results = suspicious_request['data']['dns']['suspicious']
df = pd.read_json(json.dumps(results))
##Printing only the selected column list from the dataframe
##Unless specified otherwise,
print df[['clientIp', 'unixTimestamp','tld', 'dnsQuery','dnsQueryRcode','dnsQueryRcodeLabel']]
##Filter results where the destination port = 3389
##The resulting data will be stored in df2
df2 = df[df['tld'].isin(['sjc04-login.dotomi.com'])]
print df2[['clientIp', 'unixTimestamp','tld', 'dnsQuery','dnsQueryRcode','dnsQueryRcodeLabel']]
srtd = df.sort_values(by="tld")
print srtd[['clientIp', 'unixTimestamp','tld', 'dnsQuery','dnsQueryRcode','dnsQueryRcodeLabel']]
## This command will group the results by pairs of source-destination IP
## summarizing all other columns
grpd = df.groupby(['clientIp','tld']).count()
## This will print the resulting dataframe displaying the input and output bytes columnns
print grpd[["dnsQuery"]]
# reset_scores = mutation($date:SpotDateType!) {
# dns{
# resetScoredConnections(date:$date){
# success
# }
# }
# }
# variables={
# 'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')
# }
# request = makeGraphqlRequest(reset_scores,variables)
# print request['data']['dns']['resetScoredConnections']['success']
#Your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Request Data
Step3: Now that we have a function, we can run a query like this
Step4: Pandas Dataframes
Step5: Additional operations
Step6: Ordering the data
Step7: Grouping the data
Step9: Reset Scored Connections
Step10: Sandbox
|
6,898
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import pprint
data = [{'a': 'A', 'b': 2, 'c': 3.0}]
print('DATA:', end=' ')
pprint.pprint(data)
data_string = pickle.dumps(data)
print('PICKLE: {!r}'.format(data_string))
import pickle
import pprint
data1 = [{'a': 'A', 'b': 2, 'c': 3.0}]
print('BEFORE: ', end=' ')
pprint.pprint(data1)
data1_string = pickle.dumps(data1)
data2 = pickle.loads(data1_string)
print('AFTER : ', end=' ')
pprint.pprint(data2)
print('SAME? :', (data1 is data2))
print('EQUAL?:', (data1 == data2))
import io
import pickle
import pprint
class SimpleObject:
def __init__(self, name):
self.name = name
self.name_backwards = name[::-1]
return
data = []
data.append(SimpleObject('pickle'))
data.append(SimpleObject('preserve'))
data.append(SimpleObject('last'))
# Simulate a file.
out_s = io.BytesIO()
# Write to the stream
for o in data:
print('WRITING : {} ({})'.format(o.name, o.name_backwards))
pickle.dump(o, out_s)
out_s.flush()
# Set up a read-able stream
in_s = io.BytesIO(out_s.getvalue())
# Read the data
while True:
try:
o = pickle.load(in_s)
except EOFError:
break
else:
print('READ : {} ({})'.format(
o.name, o.name_backwards))
import pickle
import sys
class SimpleObject:
def __init__(self, name):
self.name = name
l = list(name)
l.reverse()
self.name_backwards = ''.join(l)
data = []
data.append(SimpleObject('pickle'))
data.append(SimpleObject('preserve'))
data.append(SimpleObject('last'))
filename ='test.dat'
with open(filename, 'wb') as out_s:
for o in data:
print('WRITING: {} ({})'.format(
o.name, o.name_backwards))
pickle.dump(o, out_s)
with open(filename, 'rb') as in_s:
while True:
try:
o = pickle.load(in_s)
except EOFError:
break
else:
print('READ: {} ({})'.format(
o.name, o.name_backwards))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Working with Stream
Step2: Problem with Reconstructing Objects
|
6,899
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
from lxmls import DATA_PATH
import lxmls
import lxmls.sequences.crf_online as crfo
import lxmls.readers.pos_corpus as pcc
import lxmls.sequences.id_feature as idfc
import lxmls.sequences.extended_feature as exfc
from lxmls.readers import pos_corpus
corpus = lxmls.readers.pos_corpus.PostagCorpus()
train_seq = corpus.read_sequence_list_conll(DATA_PATH + "/train-02-21.conll",
max_sent_len=10, max_nr_sent=1000)
test_seq = corpus.read_sequence_list_conll(DATA_PATH + "/test-23.conll",
max_sent_len=10, max_nr_sent=1000)
dev_seq = corpus.read_sequence_list_conll(DATA_PATH + "/dev-22.conll",
max_sent_len=10, max_nr_sent=1000)
print("There are", len(train_seq), "examples in train_seq")
print("First example:", train_seq[0])
## Building features
feature_mapper = idfc.IDFeatures(train_seq)
feature_mapper.build_features()
len(feature_mapper.feature_list)
## Let's see the features for the first training example
feature_mapper.feature_list[0]
## The previous features can be classified into:
print("\nInitial features:", feature_mapper.feature_list[0][0])
print("\nTransition features:", feature_mapper.feature_list[0][1])
print("\nFinal features:", feature_mapper.feature_list[0][2])
print("\nEmission features:", feature_mapper.feature_list[0][3])
len(train_seq), len(feature_mapper.feature_list)
inv_feature_dict = {word: pos for pos, word in feature_mapper.feature_dict.items()}
feature_mapper.feature_list[0][0]
[inv_feature_dict[x[0]] for x in feature_mapper.feature_list[0][0]]
[inv_feature_dict[x[0]] for x in feature_mapper.feature_list[0][1]]
[inv_feature_dict[x[0]] for x in feature_mapper.feature_list[0][2]]
len(train_seq.x_dict)
## Train crf
crf_online = crfo.CRFOnline(corpus.word_dict, corpus.tag_dict, feature_mapper)
crf_online.num_epochs = 20
crf_online.train_supervised(train_seq)
## You will receive feedback when each epoch is finished,
## note that running the 20 epochs might take a while. After training is done,
## evaluate the learned model on the training, development and test sets.
pred_train = crf_online.viterbi_decode_corpus(train_seq)
pred_dev = crf_online.viterbi_decode_corpus(dev_seq)
pred_test = crf_online.viterbi_decode_corpus(test_seq)
eval_train = crf_online.evaluate_corpus(train_seq, pred_train)
eval_dev = crf_online.evaluate_corpus(dev_seq, pred_dev)
eval_test = crf_online.evaluate_corpus(test_seq, pred_test)
print("CRF - ID Features Accuracy Train: %.3f Dev: %.3f Test: %.3f" \
%(eval_train,eval_dev, eval_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data from the conll task
Step2: Feature generation
Step3: About feature_mappers
Step4: An observation on the features for a given example
Step5: Codification of the features
Step6: Train a crf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.