Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
6,200
|
<ASSISTANT_TASK:>
Python Code:
MODEL_NAME = 'reg-model-01'
TRAIN_DATA_FILE = 'data/train-data.csv'
VALID_DATA_FILE = 'data/valid-data.csv'
TEST_DATA_FILE = 'data/test-data.csv'
RESUME_TRAINING = False
PROCESS_FEATURES = True
MULTI_THREADING = False
HEADER = ['key','x','y','alpha','beta','target']
HEADER_DEFAULTS = [[0], [0.0], [0.0], ['NA'], ['NA'], [0.0]]
NUMERIC_FEATURE_NAMES = ['x', 'y']
CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {'alpha':['ax01', 'ax02'], 'beta':['bx01', 'bx02']}
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys())
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
TARGET_NAME = 'target'
UNUSED_FEATURE_NAMES = list(set(HEADER) - set(FEATURE_NAMES) - {TARGET_NAME})
print("Header: {}".format(HEADER))
print("Numeric Features: {}".format(NUMERIC_FEATURE_NAMES))
print("Categorical Features: {}".format(CATEGORICAL_FEATURE_NAMES))
print("Target: {}".format(TARGET_NAME))
print("Unused Features: {}".format(UNUSED_FEATURE_NAMES))
def process_dataframe(dataset_df):
dataset_df["x_2"] = np.square(dataset_df['x'])
dataset_df["y_2"] = np.square(dataset_df['y'])
dataset_df["xy"] = dataset_df['x'] * dataset_df['y']
dataset_df['dist_xy'] = np.sqrt(np.square(dataset_df['x']-dataset_df['y']))
return dataset_df
def generate_pandas_input_fn(file_name, mode=tf.estimator.ModeKeys.EVAL,
skip_header_lines=0,
num_epochs=1,
batch_size=100):
df_dataset = pd.read_csv(file_name, names=HEADER, skiprows=skip_header_lines)
x = df_dataset[FEATURE_NAMES].copy()
if PROCESS_FEATURES:
x = process_dataframe(x)
y = df_dataset[TARGET_NAME]
shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False
num_threads=1
if MULTI_THREADING:
num_threads=multiprocessing.cpu_count()
num_epochs = int(num_epochs/num_threads) if mode == tf.estimator.ModeKeys.TRAIN else num_epochs
pandas_input_fn = tf.estimator.inputs.pandas_input_fn(
batch_size=batch_size,
num_epochs= num_epochs,
shuffle=shuffle,
x=x,
y=y,
target_column=TARGET_NAME
)
print("")
print("* data input_fn:")
print("================")
print("Input file: {}".format(file_name))
print("Dataset size: {}".format(len(df_dataset)))
print("Batch size: {}".format(batch_size))
print("Epoch Count: {}".format(num_epochs))
print("Mode: {}".format(mode))
print("Thread Count: {}".format(num_threads))
print("Shuffle: {}".format(shuffle))
print("================")
print("")
return pandas_input_fn
features, target = generate_pandas_input_fn(file_name=TRAIN_DATA_FILE)()
print("Feature read from DataFrame: {}".format(list(features.keys())))
print("Target read from DataFrame: {}".format(target))
def get_feature_columns():
all_numeric_feature_names = NUMERIC_FEATURE_NAMES
CONSTRUCTED_NUMERIC_FEATURES_NAMES = ['x_2', 'y_2', 'xy', 'dist_xy']
if PROCESS_FEATURES:
all_numeric_feature_names += CONSTRUCTED_NUMERIC_FEATURES_NAMES
numeric_columns = {feature_name: tf.feature_column.numeric_column(feature_name)
for feature_name in all_numeric_feature_names}
categorical_column_with_vocabulary = \
{item[0]: tf.feature_column.categorical_column_with_vocabulary_list(item[0], item[1])
for item in CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.items()}
feature_columns = {}
if numeric_columns is not None:
feature_columns.update(numeric_columns)
if categorical_column_with_vocabulary is not None:
feature_columns.update(categorical_column_with_vocabulary)
# add extended features (crossing, bucektization, embedding)
feature_columns['alpha_X_beta'] = tf.feature_column.crossed_column(
[feature_columns['alpha'], feature_columns['beta']], 4)
return feature_columns
feature_columns = get_feature_columns()
print("Feature Columns: {}".format(feature_columns))
def create_estimator(run_config, hparams):
feature_columns = list(get_feature_columns().values())
dense_columns = list(
filter(lambda column: isinstance(column, feature_column._NumericColumn),
feature_columns
)
)
categorical_columns = list(
filter(lambda column: isinstance(column, feature_column._VocabularyListCategoricalColumn) |
isinstance(column, feature_column._BucketizedColumn),
feature_columns)
)
indicator_columns = list(
map(lambda column: tf.feature_column.indicator_column(column),
categorical_columns)
)
estimator_feature_columns = dense_columns + indicator_columns
estimator = tf.estimator.DNNRegressor(
feature_columns= estimator_feature_columns,
hidden_units= hparams.hidden_units,
optimizer= tf.train.AdamOptimizer(),
activation_fn= tf.nn.elu,
dropout= hparams.dropout_prob,
config= run_config
)
print("")
print("Estimator Type: {}".format(type(estimator)))
print("")
return estimator
hparams = tf.contrib.training.HParams(
num_epochs = 100,
batch_size = 500,
hidden_units=[8, 4],
dropout_prob = 0.0)
model_dir = 'trained_models/{}'.format(MODEL_NAME)
run_config = tf.estimator.RunConfig().replace(model_dir=model_dir)
print("Model directory: {}".format(run_config.model_dir))
print("Hyper-parameters: {}".format(hparams))
estimator = create_estimator(run_config, hparams)
train_input_fn = generate_pandas_input_fn(file_name= TRAIN_DATA_FILE,
mode=tf.estimator.ModeKeys.TRAIN,
num_epochs=hparams.num_epochs,
batch_size=hparams.batch_size)
if not RESUME_TRAINING:
shutil.rmtree(model_dir, ignore_errors=True)
tf.logging.set_verbosity(tf.logging.INFO)
time_start = datetime.utcnow()
print("Estimator training started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
estimator.train(input_fn = train_input_fn)
time_end = datetime.utcnow()
print(".......................................")
print("Estimator training finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Estimator training elapsed time: {} seconds".format(time_elapsed.total_seconds()))
TEST_SIZE = 5000
test_input_fn = generate_pandas_input_fn(file_name=TEST_DATA_FILE,
mode= tf.estimator.ModeKeys.EVAL,
batch_size= TEST_SIZE)
results = estimator.evaluate(input_fn=test_input_fn)
print("")
print(results)
rmse = round(math.sqrt(results["average_loss"]),5)
print("")
print("RMSE: {}".format(rmse))
import itertools
predict_input_fn = generate_pandas_input_fn(file_name=TEST_DATA_FILE,
mode= tf.estimator.ModeKeys.PREDICT,
batch_size= 5)
predictions = estimator.predict(input_fn=predict_input_fn)
values = list(map(lambda item: item["predictions"][0],list(itertools.islice(predictions, 5))))
print()
print("Predicted Values: {}".format(values))
def process_features(features):
features["x_2"] = tf.square(features['x'])
features["y_2"] = tf.square(features['y'])
features["xy"] = tf.multiply(features['x'], features['y'])
features['dist_xy'] = tf.sqrt(tf.squared_difference(features['x'],features['y']))
return features
def csv_serving_input_fn():
SERVING_HEADER = ['x','y','alpha','beta']
SERVING_HEADER_DEFAULTS = [[0.0], [0.0], ['NA'], ['NA']]
rows_string_tensor = tf.placeholder(dtype=tf.string,
shape=[None],
name='csv_rows')
receiver_tensor = {'csv_rows': rows_string_tensor}
row_columns = tf.expand_dims(rows_string_tensor, -1)
columns = tf.decode_csv(row_columns, record_defaults=SERVING_HEADER_DEFAULTS)
features = dict(zip(SERVING_HEADER, columns))
if PROCESS_FEATURES:
features = process_features(features)
return tf.estimator.export.ServingInputReceiver(
features, receiver_tensor)
export_dir = model_dir + "/export"
estimator.export_savedmodel(
export_dir_base = export_dir,
serving_input_receiver_fn = csv_serving_input_fn,
as_text=True
)
import os
saved_model_dir = export_dir + "/" + os.listdir(path=export_dir)[-1]
print(saved_model_dir)
predictor_fn = tf.contrib.predictor.from_saved_model(
export_dir = saved_model_dir,
signature_def_key="predict"
)
output = predictor_fn({'csv_rows': ["0.5,1,ax01,bx02", "-0.5,-1,ax02,bx02"]})
print(output)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Define Dataset Metadata
Step2: 2. Define Data Input Function
Step3: 3. Define Feature Columns
Step4: 4. Create an Estimator
Step5: b. Set hyper-parameter values (HParams)
Step6: c. Instantiate the estimator
Step7: 5. Train the Estimator
Step8: 6. Evaluate the Model
Step9: 7. Prediction
Step10: 8. Save & Serve the Model
Step11: b. Export SavedModel
Step12: c. Serve the Saved Model
|
6,201
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import numpy as np
from mayavi import mlab
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.simulation import simulate_stc, simulate_evoked
seed = 42
# parameters for inverse method
method = 'sLORETA'
snr = 3.
lambda2 = 1.0 / snr ** 2
# signal simulation parameters
# do not add extra noise to the known signals
nave = np.inf
T = 100
times = np.linspace(0, 1, T)
dt = times[1] - times[0]
# Paths to MEG data
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis-ave.fif')
fwd = mne.read_forward_solution(fname_fwd)
fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True,
use_cps=False)
fwd['info']['bads'] = []
inv_op = read_inverse_operator(fname_inv)
raw = mne.io.RawFIF(op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw.fif'))
events = mne.find_events(raw)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True)
epochs.info['bads'] = []
evoked = epochs.average()
labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir)
label_names = [l.name for l in labels]
n_labels = len(labels)
cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
# The known signal is all zero-s off of the two labels of interest
signal = np.zeros((n_labels, T))
idx = label_names.index('inferiorparietal-lh')
signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times)
idx = label_names.index('rostralmiddlefrontal-rh')
signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times)
hemi_to_ind = {'lh': 0, 'rh': 1}
for i, label in enumerate(labels):
# The `center_of_mass` function needs labels to have values.
labels[i].values.fill(1.)
# Restrict the eligible vertices to be those on the surface under
# consideration and within the label.
surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']
restrict_verts = np.intersect1d(surf_vertices, label.vertices)
com = labels[i].center_of_mass(subject='sample',
subjects_dir=subjects_dir,
restrict_vertices=restrict_verts,
surf='white')
# Convert the center of vertex index from surface vertex list to Label's
# vertex list.
cent_idx = np.where(label.vertices == com)[0][0]
# Create a mask with 1 at center vertex and zeros elsewhere.
labels[i].values.fill(0.)
labels[i].values[cent_idx] = 1.
stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,
value_fun=lambda x: x)
kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,
time_unit='s', initial_time=0.05, size=1200,
views=['lat', 'med'])
clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])
figs = [mlab.figure(1), mlab.figure(2), mlab.figure(3), mlab.figure(4)]
brain_gen = stc_gen.plot(clim=clim, figure=figs, **kwargs)
evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave,
random_state=seed)
# Map the simulated sensor-space data to source-space using the inverse
# operator.
stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method)
figs = [mlab.figure(5), mlab.figure(6), mlab.figure(7), mlab.figure(8)]
brain_inv = stc_inv.plot(figure=figs, **kwargs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we set some parameters.
Step2: Load the MEG data
Step3: Estimate the background noise covariance from the baseline period
Step4: Generate sinusoids in two spatially distant labels
Step5: Find the center vertices in source space of each label
Step6: Create source-space data with known signals
Step7: Plot original signals
Step8: Simulate sensor-space signals
Step9: Plot the point-spread of corrupted signal
|
6,202
|
<ASSISTANT_TASK:>
Python Code:
# system functions that are always useful to have
import time, sys, os
# basic numeric setup
import numpy as np
import math
# inline plotting
%matplotlib inline
# plotting
import matplotlib
from matplotlib import pyplot as plt
# seed the random number generator
rstate = np.random.default_rng(715)
# re-defining plotting defaults
from matplotlib import rcParams
rcParams.update({'xtick.major.pad': '7.0'})
rcParams.update({'xtick.major.size': '7.5'})
rcParams.update({'xtick.major.width': '1.5'})
rcParams.update({'xtick.minor.pad': '7.0'})
rcParams.update({'xtick.minor.size': '3.5'})
rcParams.update({'xtick.minor.width': '1.0'})
rcParams.update({'ytick.major.pad': '7.0'})
rcParams.update({'ytick.major.size': '7.5'})
rcParams.update({'ytick.major.width': '1.5'})
rcParams.update({'ytick.minor.pad': '7.0'})
rcParams.update({'ytick.minor.size': '3.5'})
rcParams.update({'ytick.minor.width': '1.0'})
rcParams.update({'font.size': 30})
import dynesty
# defining constants
r = 2. # radius
w = 0.1 # width
c1 = np.array([-3.5, 0.]) # center of shell 1
c2 = np.array([3.5, 0.]) # center of shell 2
const = math.log(1. / math.sqrt(2. * math.pi * w**2)) # normalization constant
# log-likelihood of a single shell
def logcirc(theta, c):
d = np.sqrt(np.sum((theta - c)**2, axis=-1)) # |theta - c|
return const - (d - r)**2 / (2. * w**2)
# log-likelihood of two shells
def loglike(theta):
return np.logaddexp(logcirc(theta, c1), logcirc(theta, c2))
# our prior transform
def prior_transform(x):
return 12. * x - 6.
# compute likelihood surface over a 2-D grid
xx, yy = np.meshgrid(np.linspace(-6., 6., 200), np.linspace(-6., 6., 200))
L = np.exp(loglike(np.dstack((xx, yy))))
# plot result
fig = plt.figure(figsize=(6,5))
plt.scatter(xx, yy, c=L, s=0.5)
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.colorbar(label=r'$\mathcal{L}$');
# run with all defaults
sampler = dynesty.DynamicNestedSampler(loglike, prior_transform, ndim=2, rstate=rstate)
sampler.run_nested()
res = sampler.results
from dynesty import plotting as dyplot
dyplot.cornerplot(sampler.results, span=([-6, 6], [-6, 6]), fig=plt.subplots(2, 2, figsize=(10, 10)));
# bounding methods
bounds = ['none', 'single', 'multi', 'balls', 'cubes']
# run over each method and collect our results
bounds_res = []
for b in bounds:
sampler = dynesty.NestedSampler(loglike, prior_transform, ndim=2,
bound=b, sample='unif', nlive=500,
first_update={'min_ncall': 0.,
'min_eff': 100.}, rstate=rstate)
sys.stderr.flush()
sys.stderr.write('{}:\n'.format(b))
sys.stderr.flush()
t0 = time.time()
sampler.run_nested(dlogz=0.05)
t1 = time.time()
res = sampler.results
dtime = t1 - t0
sys.stderr.flush()
sys.stderr.write('\ntime: {0}s\n\n'.format(dtime))
bounds_res.append(sampler.results)
from dynesty import plotting as dyplot
# initialize figure
fig, axes = plt.subplots(1, 1, figsize=(6, 6))
# plot proposals in corner format for 'none'
fg, ax = dyplot.cornerbound(bounds_res[0], it=2000, prior_transform=prior_transform,
show_live=True, fig=(fig, axes))
ax[0, 0].set_title('No Bound', fontsize=26)
ax[0, 0].set_xlim([-6.5, 6.5])
ax[0, 0].set_ylim([-6.5, 6.5]);
# initialize figure
fig, axes = plt.subplots(1, 3, figsize=(18, 6))
axes = axes.reshape((1, 3))
[a.set_frame_on(False) for a in axes[:, 1]]
[a.set_xticks([]) for a in axes[:, 1]]
[a.set_yticks([]) for a in axes[:, 1]]
# plot proposals in corner format for 'single'
fg, ax = dyplot.cornerbound(bounds_res[1], it=2000, prior_transform=prior_transform,
show_live=True, fig=(fig, axes[:, 0]))
ax[0, 0].set_title('Single', fontsize=26)
ax[0, 0].set_xlim([-6.5, 6.5])
ax[0, 0].set_ylim([-6.5, 6.5])
# plot proposals in corner format for 'multi'
fg, ax = dyplot.cornerbound(bounds_res[2], it=2000, prior_transform=prior_transform,
show_live=True, fig=(fig, axes[:, 2]))
ax[0, 0].set_title('Multi', fontsize=26)
ax[0, 0].set_xlim([-6.5, 6.5])
ax[0, 0].set_ylim([-6.5, 6.5]);
# initialize figure
fig, axes = plt.subplots(1, 3, figsize=(18, 6))
axes = axes.reshape((1, 3))
[a.set_frame_on(False) for a in axes[:, 1]]
[a.set_xticks([]) for a in axes[:, 1]]
[a.set_yticks([]) for a in axes[:, 1]]
# plot proposals in corner format for 'balls'
fg, ax = dyplot.cornerbound(bounds_res[3], it=1500, prior_transform=prior_transform,
show_live=True, fig=(fig, axes[:, 0]))
ax[0, 0].set_title('Balls', fontsize=26)
ax[0, 0].set_xlim([-6.5, 6.5])
ax[0, 0].set_ylim([-6.5, 6.5])
# plot proposals in corner format for 'cubes'
fg, ax = dyplot.cornerbound(bounds_res[4], it=1500, prior_transform=prior_transform,
show_live=True, fig=(fig, axes[:, 2]))
ax[0, 0].set_title('Cubes', fontsize=26)
ax[0, 0].set_xlim([-6.5, 6.5])
ax[0, 0].set_ylim([-6.5, 6.5]);
# the proposals associated with our 'multi' bounds
bounds_res[2].bound
# compute effective 'single' volumes
single_logvols = [0.] # unit cube
for bound in bounds_res[1].bound[1:]:
logvol = bound.logvol # volume
funit = bound.unitcube_overlap(rstate=rstate) # fractional overlap with unit cube
single_logvols.append(logvol +np.log(funit))
single_logvols = np.array(single_logvols)
# compute effective 'multi' volumes
multi_logvols = [0.] # unit cube
for bound in bounds_res[2].bound[1:]: # skip unit cube
logvol, funit = bound.monte_carlo_logvol(rstate=rstate, return_overlap=True)
multi_logvols.append(logvol +np.log( funit)) # numerical estimate via Monte Carlo methods
multi_logvols = np.array(multi_logvols)
# plot results as a function of ln(volume)
plt.figure(figsize=(12,6))
plt.xlabel(r'$-\ln X_i$')
plt.ylabel(r'$\ln V_i$')
# 'single'
res = bounds_res[1]
x = -res.logvol # ln(prior volume)
it = res.bound_iter # proposal idx at given iteration
y = single_logvols[it] # corresponding ln(bounding volume)
plt.plot(x, y, lw=3, label='single')
# 'multi'
res = bounds_res[2]
x, it = -res.logvol, res.bound_iter
y = multi_logvols[it]
plt.plot(x, y, lw=3, label='multi')
plt.legend(loc='best', fontsize=24);
# bounding methods
sampling = ['unif', 'rwalk', 'slice', 'rslice', 'hslice']
# run over each method and collect our results
sampling_res = []
for s in sampling:
sampler = dynesty.NestedSampler(loglike, prior_transform, ndim=2,
bound='multi', sample=s, nlive=1000,
rstate=rstate)
sys.stderr.flush()
sys.stderr.write('{}:\n'.format(s))
sys.stderr.flush()
t0 = time.time()
sampler.run_nested(dlogz=0.05)
t1 = time.time()
res = sampler.results
dtime = t1 - t0
sys.stderr.flush()
sys.stderr.write('\ntime: {0}s\n\n'.format(dtime))
sampling_res.append(sampler.results)
# setup for running tests over gaussian shells in arbitrary dimensions
def run(ndim, bootstrap, bound, method, nlive):
Convenience function for running in any dimension.
c1 = np.zeros(ndim)
c1[0] = -3.5
c2 = np.zeros(ndim)
c2[0] = 3.5
f = lambda theta: np.logaddexp(logcirc(theta, c1), logcirc(theta, c2))
sampler = dynesty.NestedSampler(f, prior_transform, ndim,
bound=bound, sample=method, nlive=nlive,
bootstrap=bootstrap,
first_update={'min_ncall': 0.,
'min_eff': 100.},
rstate=rstate)
sampler.run_nested(dlogz=0.5)
return sampler.results
# analytic ln(evidence) values
ndims = [2, 5, 10]
analytic_logz = {2: -1.75,
5: -5.67,
10: -14.59}
# results with bootstrapping
results = []
times = []
for ndim in ndims:
t0 = time.time()
sys.stderr.flush()
sys.stderr.write('{} dimensions:\n'.format(ndim))
sys.stderr.flush()
res = run(ndim, 20, 'multi', 'unif', 2000)
sys.stderr.flush()
curdt = time.time() - t0
times.append(curdt)
sys.stderr.write('\ntime: {0}s\n\n'.format(curdt))
results.append(res)
# results without bootstrapping
results2 = []
times2 = []
for ndim in ndims:
t0 = time.time()
sys.stderr.flush()
sys.stderr.write('{} dimensions:\n'.format(ndim))
sys.stderr.flush()
res = run(ndim, 0, 'multi', 'unif', 2000)
sys.stderr.flush()
curdt = time.time() - t0
times2.append(curdt)
sys.stderr.write('\ntime: {0}s\n\n'.format(curdt))
results2.append(res)
print('With bootstrapping:')
print("D analytic logz logzerr nlike eff(%) time")
for ndim, curt, res in zip(ndims, times, results):
print("{:2d} {:6.2f} {:6.2f} {:4.2f} {:6d} {:5.2f} {:6.2f}"
.format(ndim, analytic_logz[ndim], res.logz[-1], res.logzerr[-1],
sum(res.ncall), res.eff, curt))
print('\n')
print('Without bootstrapping:')
print("D analytic logz logzerr nlike eff(%) time")
for ndim, curt, res in zip(ndims, times2, results2):
print("{:2d} {:6.2f} {:6.2f} {:4.2f} {:6d} {:5.2f} {:6.2f}"
.format(ndim, analytic_logz[ndim], res.logz[-1], res.logzerr[-1],
sum(res.ncall), res.eff, curt))
# adding on slice sampling
results3 = []
times3 = []
for ndim in ndims:
t0 = time.time()
sys.stderr.flush()
sys.stderr.write('{} dimensions:\n'.format(ndim))
sys.stderr.flush()
res = run(ndim, 0, 'multi', 'rslice', 2000)
sys.stderr.flush()
curdt = time.time() - t0
times3.append(curdt)
sys.stderr.write('\ntime: {0}s\n\n'.format(curdt))
results3.append(res)
print('Random Slice sampling:')
print("D analytic logz logzerr nlike eff(%) time")
for ndim, curt, res in zip([2, 5, 10, 20], times3, results3):
print("{:2d} {:6.2f} {:6.2f} {:4.2f} {:8d} {:5.2f} {:6.2f}"
.format(ndim, analytic_logz[ndim], res.logz[-1], res.logzerr[-1],
sum(res.ncall), res.eff, curt))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2-D Gaussian Shells
Step2: Default Run
Step3: Bounding Options
Step4: We can see the amount of overhead associated with 'balls' and 'cubes' is non-trivial in this case. This mainly comes from sampling from our bouding distributions, since accepting or rejecting a point requires counting all neighbors within some radius $r$, leading to frequent nearest-neighbor searches.
Step5: Now let's examine the single and multi-ellipsoidal cases.
Step6: Finally, let's take a look at our overlapping set of balls and cubes.
Step7: Bounding Objects
Step8: Each bounding object has a host of additional functionality that the user can experiment with. For instance, the volume contained by the union of ellipsoids within MultiEllipsoid can be estimated using Monte Carlo integration (but otherwise are not computed by default). These volume estimates, combined with what fraction of our samples overlap with the unit cube (since our bounding distributions can exceed our prior bounds), can give us an idea of how effectively our multi-ellipsoid bounds are shrinking over time compared with the single-ellipsoid case.
Step9: We see that in the beginning, only a single ellipsoid is used. After some bounding updates have been made, there is enough of an incentive to split the proposal into several ellipsoids. Although the initial ellipsoid decompositions can be somewhat unstable (i.e. bootstrapping can give relatively large volume expansion factors), over time this process leads to a significant decrease in effective overall volume.
Step11: As expected, uniform sampling in 2-D is substantially more efficient that other more complex alternatives (especially 'hslice', which is computing numerical gradients!). Regardless of runtime, however, we see that each method runs for a similar number of iterations and gives similar logz values (with comparable errors). They thus appear to be unbiased both with respect to each other and with respect to the analytic solution ($\ln\mathcal{Z} = −1.75$).
Step12: While our results are comparable between both cases, in higher dimensions multi-ellipsoid bounding distributions can sometimes be over-constrained, leading to biased results. Other sampling methods mitigate this problem by sampling conditioned on the ellipsoid axes, and so only depends on ellipsoid shapes, not sizes. 'rslice' is demonstrated below.
|
6,203
|
<ASSISTANT_TASK:>
Python Code:
import os
from google.cloud import bigquery
PROJECT = !gcloud config list --format 'value(core.project)'
PROJECT = PROJECT[0]
BUCKET = PROJECT
REGION = "us-central1"
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
%%bash
## Create a BigQuery dataset for babyweight if it doesn't exist
datasetexists=$(bq ls -d | grep -w ) # TODO: Add dataset name
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: babyweight"
bq --location=US mk --dataset \
--description "Babyweight" \
$PROJECT:# TODO: Add dataset name
echo "Here are your current datasets:"
bq ls
fi
## Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${BUCKET}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${BUCKET}
echo "Here are your current buckets:"
gsutil ls
fi
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_data AS
SELECT
# TODO: Add selected raw features and preprocessed features
FROM
publicdata.samples.natality
WHERE
# TODO: Add filters
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_augmented_data AS
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
hashmonth
FROM
babyweight.babyweight_data
UNION ALL
SELECT
# TODO: Replace is_male and plurality as indicated above
FROM
babyweight.babyweight_data
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_data_train AS
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
babyweight.babyweight_augmented_data
WHERE
# TODO: Modulo hashmonth to be approximately 75% of the data
%%bigquery
CREATE OR REPLACE TABLE
babyweight.babyweight_data_eval AS
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
babyweight.babyweight_augmented_data
WHERE
# TODO: Modulo hashmonth to be approximately 25% of the data
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM babyweight.babyweight_data_train
LIMIT 0
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM babyweight.babyweight_data_eval
LIMIT 0
# Construct a BigQuery client object.
client = bigquery.Client()
dataset_name = # TODO: Add dataset name
# Create dataset reference object
dataset_ref = client.dataset(
dataset_id=dataset_name, project=client.project)
# Export both train and eval tables
for step in [
# TODO: Loop over train and eval
]:
destination_uri = os.path.join(
"gs://", BUCKET, dataset_name, "data", "{}*.csv".format(step))
table_name = "babyweight_data_{}".format(step)
table_ref = dataset_ref.table(table_name)
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location="US",
) # API request
extract_job.result() # Waits for job to complete.
print("Exported {}:{}.{} to {}".format(
client.project, dataset_name, table_name, destination_uri))
%%bash
gsutil ls gs://${BUCKET}/babyweight/data/*.csv
%%bash
gsutil cat gs://${BUCKET}/babyweight/data/train000000000000.csv | head -5
%%bash
gsutil cat gs://${BUCKET}/babyweight/data/eval000000000000.csv | head -5
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set environment variables so that we can use them throughout the entire lab. We will be using our project ID for our bucket.
Step2: The source dataset
Step3: Create the training and evaluation data tables
Step4: Lab Task #2
Step5: Lab Task #3
Step6: Split augmented dataset into eval dataset
Step7: Verify table creation
Step8: Lab Task #4
Step9: Verify CSV creation
|
6,204
|
<ASSISTANT_TASK:>
Python Code:
my_cat = 'cheshire'
catepillar_question = 'Who are you?'
_catepillar_question = 'Who are you?'
catepillar_Question1 = 'Who are you?'
1catepillar_question = 'Who are you?'
my_cat = 'cheshire'
my_cat = 'grinning'
my_cat = 'cheshire'
print(my_cat)
# see if you can display the value assigned to `my_motto` below
my_motto = "It's no use going back to yesterday" + ", " + "because I was a different person then."
title = 'Alice in Wonderland'
dir(title)
title.upper()
title.lower()
# use the `dir()` command below to identify the methods available on the variable below
song = '''
How doth the little crocodile
Improve his shining tail,
And pour the waters of the Nile
On every golden scale!
'''
# the code below will result in an error
main_character = Alice
# fix the above code here
opening = '''fAlice was beginning to get very tired of sitting by her sister on the bank,
and of having nothing to do. Once or twice she had peeped into the book her
sister was reading, but it had no pictures or conversations in it, ‘and what is
the use of a book,’ thought fAlice ‘without pictures or conversation?’'''
opening = opening.replace('fAlice', 'Alice')
print(opening)
facts = ['four times five is twelve', 'four times six is thirteen']
facts.append('and four times seven is–oh dear!')
print(facts)
# add a new fact to our list of "facts" here
# print the new list
print(facts[0])
# print 'and four times seven is–oh dear!' from the facts list
# the code below will turn the opening paragraph-currently stored as string data-into a list
opening_sentences = opening.split('.')
print(opening_sentences)
# using what you know about lists and index numbers, print the first sentence to Alice in Wonderland here
my_file = open('alice-in-wonderland.txt').read()
# try to load a file into memory here
# type your code here
lines = [
'How cheerfully he seems to grin',
'How neatly spread his claws',
'And welcome little fishes in',
'With gently smiling jaws!'
]
for line in lines:
print(line)
lines = [
'Beautiful Soup, so rich and green',
'Waiting in a hot tureen!',
'Who for such dainties would not stoop?',
'Soup of the evening, beautiful Soup',
]
for line in lines:
print(line.upper())
import collections
word_counter = collections.Counter()
dir(collections)
poem = '''
‘The Queen of Hearts, she made some tarts,
All on a summer day:
The Knave of Hearts, he stole those tarts,
And took them quite away!’
'''
# split the poem into a list of words
words = poem.split()
for word in words:
word_counter[word] += 1
print(word_counter)
%matplotlib inline
import helpers
helpers.plot_counts(word_counter)
# type your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here my_cat is the variable, and 'cheshire' is the value assigned to that variable. In general in Python, the thing on the left hand side of the equals sign is the variable, and the thing on the right hand side is that variable's value.
Step2: Variables can be reassigned in Python. We might assign a value to a variable, then later assign a different value to that variable
Step3: Here my_cat is given an initial value 'cheshire', but is then given a new value 'grinning'.
Step4: The print() command is a type of function. We can recognize print() as a function because it has parentheses after the name. In general, functions are little pieces of code that take arguments (the text enclosed in parentheses) as input and do something with those arguments. The print() function, for example, takes as its argument a variable like my_cat, then displays the value assigned to that variable. We will explore functions in greater depth below. For now, we just want to note that print() is a function, which we can recognize by the presence of parentheses after the function's variable name.
Step5: Introspection
Step6: Running the dir() function returns a list of methods that are defined on the provided variable. In the example above, we can see that title has 50 or so methods defined. Methods are similar to functions, except they're tied to the variable (or object) you're working with. You can call a method using dot notation. Let's look at a few examples
Step7: We can see that the upper() method lets us uppercase our title, and the lower() method lets us lowercase our title. We will use more methods in just a moment. For now, we just want to remember that the dir() command tells us which methods can be called on variables.
Step8: Data Type
Step9: With string data, we often need to process it to find specific bits of interest, or to clean extraneous characters. Let's practice processing strings below.
Step10: The data in the string above looks good, except "Alice" is prefaced with an "f" for some reason. Let's clean that up with the replace() method, which replaces all instances of the first argument with the second argument
Step11: Data Type
Step12: Just like strings, lists have their own set of methods we can use. Let's practice the append() method, which adds an item to a list
Step13: See if you can add a new fact to our list of "facts" below
Step14: To access an item in the list, we use that item's index value. Keep in mind, Python starts counting at 0! So, if we want to print 'four times five is twelve' (which is the first item in our facts list), we would type
Step15: Try to access 'and four times seven is–oh dear!' from facts
Step16: Now that we know a bit about lists, let's go back to the opening paragraph of Alice in Wonderland, which we've saved in the variable opening. Suppose we only want to examine the first sentence in the paragraph. To accomplish this goal, we could use the split() function, which cuts a string into a list of strings based on a user-provided argument.
Step17: Reading Files
Step18: You'll notice that the line above starts with open(). open() is an example of a built-in function, which means it comes predefined in all Python files. There are a small handful of these built-in functions, including the print() and dir() functions we saw above. For now we should note that open() returns a "file handler" that contains a read() method. Calling that read() method lets us read the text content of a file into memory.
Step19: <h3 style='color
Step20: <img style='width
Step21: A for loop examines each item in a list from left to right (or top to bottom if you like). For loops are structured as follows
Step22: Modules
Step23: The lines above show the general pattern we follow to use modules in Python. First we must import the module we wish to use. That import line makes it possible for us to use the variables and functions defined in the module in our own code.
Step24: Now that we've imported the collections module and created a Counter, let's use that Counter to count some words
Step25: That's a little hard to read, but by using just one more module we can visualize our word counts
Step26: <h3 style='color
|
6,205
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-2', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
6,206
|
<ASSISTANT_TASK:>
Python Code:
ENDPOINT = "<YOUR_ENDPOINT>"
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
%%writefile kfp-cli/Dockerfile
# TODO
IMAGE_NAME = "kfp-cli"
TAG = "latest"
IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
!gcloud builds # COMPLETE THE COMMAND
%%writefile cloudbuild.yaml
steps:
# Build the trainer image
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/$_TRAINER_IMAGE_NAME:$TAG_NAME', '.']
dir: $_PIPELINE_FOLDER/trainer_image
# TODO: Build the base image for lightweight components
- name: # TODO
args: # TODO
dir: # TODO
# Compile the pipeline
# TODO: Set the environment variables below for the $_PIPELINE_DSL script
# HINT: https://cloud.google.com/cloud-build/docs/configuring-builds/substitute-variable-values
- name: 'gcr.io/$PROJECT_ID/kfp-cli'
args:
- '-c'
- |
dsl-compile --py $_PIPELINE_DSL --output $_PIPELINE_PACKAGE
env:
- 'BASE_IMAGE= # TODO
- 'TRAINER_IMAGE= # TODO
- 'RUNTIME_VERSION= # TODO
- 'PYTHON_VERSION= # TODO
- 'COMPONENT_URL_SEARCH_PREFIX= # TODO
- 'USE_KFP_SA=$_USE_KFP_SA'
dir: $_PIPELINE_FOLDER/pipeline
# Upload the pipeline
# TODO: Use the kfp-cli Cloud Builder and write the command to upload the ktf pipeline
- name: # TODO
args:
- '-c'
- |
# TODO
dir: $_PIPELINE_FOLDER/pipeline
# Push the images to Container Registry
# TODO: List the images to be pushed to the project Docker registry
images: # TODO
SUBSTITUTIONS =
_ENDPOINT={},\
_TRAINER_IMAGE_NAME=trainer_image,\
_BASE_IMAGE_NAME=base_image,\
TAG_NAME=test,\
_PIPELINE_FOLDER=.,\
_PIPELINE_DSL=covertype_training_pipeline.py,\
_PIPELINE_PACKAGE=covertype_training_pipeline.yaml,\
_PIPELINE_NAME=covertype_continuous_training,\
_RUNTIME_VERSION=1.15,\
_PYTHON_VERSION=3.7,\
_USE_KFP_SA=True,\
_COMPONENT_URL_SEARCH_PREFIX=https://raw.githubusercontent.com/kubeflow/pipelines/0.2.5/components/gcp/
.format(
ENDPOINT
).strip()
!gcloud builds submit . --config cloudbuild.yaml --substitutions {SUBSTITUTIONS}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating the KFP CLI builder
Step2: Build the image and push it to your project's Container Registry.
Step3: Exercise
Step4: Understanding the Cloud Build workflow.
Step6: Manually triggering CI/CD runs
|
6,207
|
<ASSISTANT_TASK:>
Python Code:
from cobra import Model, Reaction, Metabolite
# Best practise: SBML compliant IDs
cobra_model = Model('example_cobra_model')
reaction = Reaction('3OAS140')
reaction.name = '3 oxoacyl acyl carrier protein synthase n C140 '
reaction.subsystem = 'Cell Envelope Biosynthesis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.objective_coefficient = 0. # this is the default
ACP_c = Metabolite(
'ACP_c',
formula='C11H21N2O7PRS',
name='acyl-carrier-protein',
compartment='c')
omrsACP_c = Metabolite(
'3omrsACP_c',
formula='C25H45N2O9PRS',
name='3-Oxotetradecanoyl-acyl-carrier-protein',
compartment='c')
co2_c = Metabolite(
'co2_c',
formula='CO2',
name='CO2',
compartment='c')
malACP_c = Metabolite(
'malACP_c',
formula='C14H22N2O10PRS',
name='Malonyl-acyl-carrier-protein',
compartment='c')
h_c = Metabolite(
'h_c',
formula='H',
name='H',
compartment='c')
ddcaACP_c = Metabolite(
'ddcaACP_c',
formula='C23H43N2O8PRS',
name='Dodecanoyl-ACP-n-C120ACP',
compartment='c')
reaction.add_metabolites({malACP_c: -1.0,
h_c: -1.0,
ddcaACP_c: -1.0,
co2_c: 1.0,
ACP_c: 1.0,
omrsACP_c: 1.0})
reaction.reaction # This gives a string representation of the reaction
reaction.gene_reaction_rule = '( STM2378 or STM1197 )'
reaction.genes
print('%i reactions initially' % len(cobra_model.reactions))
print('%i metabolites initially' % len(cobra_model.metabolites))
print('%i genes initially' % len(cobra_model.genes))
cobra_model.add_reaction(reaction)
# Now there are things in the model
print('%i reaction' % len(cobra_model.reactions))
print('%i metabolites' % len(cobra_model.metabolites))
print('%i genes' % len(cobra_model.genes))
# Iterate through the the objects in the model
print("Reactions")
print("---------")
for x in cobra_model.reactions:
print("%s : %s" % (x.id, x.reaction))
print("")
print("Metabolites")
print("-----------")
for x in cobra_model.metabolites:
print('%9s : %s' % (x.id, x.formula))
print("")
print("Genes")
print("-----")
for x in cobra_model.genes:
associated_ids = (i.id for i in x.reactions)
print("%s is associated with reactions: %s" %
(x.id, "{" + ", ".join(associated_ids) + "}"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need to create metabolites as well. If we were using an existing model, we could use get_by_id to get the apporpriate Metabolite objects instead.
Step2: Adding metabolites to a reaction requires using a dictionary of the metabolites and their stoichiometric coefficients. A group of metabolites can be added all at once, or they can be added one at a time.
Step3: The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in Schellenberger et al 2011 Nature Protocols 6(9)
Step4: At this point in time, the model is still empty
Step5: We will add the reaction to the model, which will also add all associated metabolites and genes
Step6: We can iterate through the model objects to observe the contents
|
6,208
|
<ASSISTANT_TASK:>
Python Code:
from notebook_preamble import J, V, define
define('BTree-iter == [not] [pop] roll< [dupdip rest rest] cons [step] genrec')
J('[] [23] BTree-iter') # It doesn't matter what F is as it won't be used.
J('["tommy" 23 [] []] [first] BTree-iter')
J('["tommy" 23 ["richard" 48 [] []] ["jenny" 18 [] []]] [first] BTree-iter')
J('["tommy" 23 ["richard" 48 [] []] ["jenny" 18 [] []]] [second] BTree-iter')
define('BTree-new == swap [[] []] cons cons')
V('"v" "k" BTree-new')
define('P == pop roll> pop first')
V('["k" "v" [] []] "vv" "kk" [0] P >')
define('K == cons cons dipdd')
define('T == [K] cons cons cons infra')
V('"r" "l" "v" "k" "vv" "kk" [0] K')
V('["k" "v" "l" "r"] "vv" "kk" [0] T')
define('Te == [cons cons dipd] cons cons cons infra')
V('["k" "v" "l" "r"] "vv" "kk" [0] Te')
define('Ee == pop swap roll< rest rest cons cons')
V('["k" "v" "l" "r"] "vv" "k" [0] Ee')
define('E == [P <] [Te] [Ee] ifte')
define('BTree-add == [popop not] [[pop] dipd BTree-new] [] [[P >] [T] [E] ifte] genrec')
J('[] 23 "b" BTree-add') # Initial
J('["b" 23 [] []] 88 "c" BTree-add') # Less than
J('["b" 23 [] []] 88 "a" BTree-add') # Greater than
J('["b" 23 [] []] 88 "b" BTree-add') # Equal to
J('[] 23 "a" BTree-add 88 "b" BTree-add 44 "c" BTree-add') # Series.
J('[] [3 9 5 2 8 6 7 8 4] [0 swap BTree-add] step')
define('to_set == [] swap [0 swap BTree-add] step')
J('[3 9 5 2 8 6 7 8 4] to_set')
define('unique == [to_set [first] BTree-iter] cons run')
J('[3 9 3 5 2 9 8 8 8 6 2 7 8 4 3] unique') # Filter duplicate items.
from joy.library import FunctionWrapper
from joy.utils.stack import pushback
from notebook_preamble import D
@FunctionWrapper
def cmp_(stack, expression, dictionary):
L, (E, (G, (b, (a, stack)))) = stack
expression = pushback(G if a > b else L if a < b else E, expression)
return stack, expression, dictionary
D['cmp'] = cmp_
J("1 0 ['G'] ['E'] ['L'] cmp")
J("1 1 ['G'] ['E'] ['L'] cmp")
J("0 1 ['G'] ['E'] ['L'] cmp")
from joy.library import DefinitionWrapper
DefinitionWrapper.add_definitions('''
P == over [popop popop first] nullary
T> == [cons cons dipdd] cons cons cons infra
T< == [cons cons dipd] cons cons cons infra
E == pop swap roll< rest rest cons cons
BTree-add == [popop not] [[pop] dipd BTree-new] [] [P [T>] [E] [T<] cmp] genrec
''', D)
J('[] 23 "b" BTree-add') # Initial
J('["b" 23 [] []] 88 "c" BTree-add') # Less than
J('["b" 23 [] []] 88 "a" BTree-add') # Greater than
J('["b" 23 [] []] 88 "b" BTree-add') # Equal to
J('[] 23 "a" BTree-add 88 "b" BTree-add 44 "c" BTree-add') # Series.
define('BTree-iter-order == [not] [pop] [dup third] [[cons dip] dupdip [[first] dupdip] dip [rest rest rest first] dip i] genrec')
J('[3 9 5 2 8 6 7 8 4] to_set BTree-iter-order')
# I don't want to deal with name conflicts with the above so I'm inlining everything here.
# The original Joy system has "hide" which is a meta-command which allows you to use named
# definitions that are only in scope for a given definition. I don't want to implement
# that (yet) so...
define('''
BTree-get == [pop not] swap [] [
over [pop popop first] nullary
[[rest rest rest first] dipd i]
[popop second]
[[third] dipd i]
cmp
] genrec
''')
J('[] "gary" [popop "err"] BTree-get')
J('["gary" 23 [] []] "gary" [popop "err"] BTree-get')
J('''
[] [[0 'a'] [1 'b'] [2 'c']] [i BTree-add] step
'c' [popop 'not found'] BTree-get
''')
DefinitionWrapper.add_definitions('''
TS0 == [not] swap unit [pop] swoncat
TS1 == [dip] cons [i] swoncat
treestep == swap [map] swoncat [TS1 [TS0] dip] dip genrec
''', D)
J('[] 0 [sum +] [] treestep')
J('[23 []] 0 [sum +] [] treestep')
J('[23 [[2 []] [3 []]]] 0 [sum +] [] treestep')
define('TS1 == [dip] cons [uncons] swoncat') # We only need to redefine one word.
J('[23 [2] [3]] 0 [sum +] [] treestep')
J('[23 [2 [8] [9]] [3] [4 []]] 0 [sum +] [] treestep')
J('[[3 0] [[2 0] [] []] [[9 0] [[5 0] [[4 0] [] []] [[8 0] [[6 0] [] [[7 0] [] []]] []]] []]] 23 [i] [uncons pop] treestep')
J('[[3 0] [[2 0] [] []] [[9 0] [[5 0] [[4 0] [] []] [[8 0] [[6 0] [] [[7 0] [] []]] []]] []]] [] [flatten cons] [first] treestep')
J('[[3 0] [[2 0] [] []] [[9 0] [[5 0] [[4 0] [] []] [[8 0] [[6 0] [] [[7 0] [] []]] []]] []]] [] [i roll< swons concat] [uncons pop] treestep')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Adding Nodes to the BTree
Step2: (As an implementation detail, the [[] []] literal used in the definition of BTree-new will be reused to supply the constant tail for all new nodes produced by it. This is one of those cases where you get amortized storage "for free" by using persistent datastructures. Because the tail, which is ((), ((), ())) in Python, is immutable and embedded in the definition body for BTree-new, all new nodes can reuse it as their own tail without fear that some other code somewhere will change it.)
Step3: If the key we're adding is greater than the node's key.
Step4: If the key we're adding is less than the node's key.
Step5: Else the keys must be equal.
Step6: Now we can define BTree-add
Step7: We can use this to make a set-like datastructure by just setting values to e.g. 0 and ignoring them. It's set-like in that duplicate items added to it will only occur once within it, and we can query it in $O(\log_2 N)$ time.
Step8: And with that we can write a little program to remove duplicate items from a list.
Step9: cmp combinator
Step10: Factoring and naming
Step11: Getting values by key
Step12: TODO
Step13: [] 0 [C] [N] treestep
Step14: A slight modification.
Step15: I think these trees seem a little easier to read.
Step16: Doesn't work because map extracts the first item of whatever its mapped function produces. We have to return a list, rather than depositing our results directly on the stack.
Step17: There we go.
|
6,209
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
# Crear una seríe indicando la producción por mes
produccion = pd.Series( [120,130,110,150,170,180,170,160,190,175,160,141],
index=['ene','feb','mar','abr','may','jun','jul','ago','sep','oct','nov','dec'] )
print(produccion)
# Crear una serie a partir de un diccionario
dict = { 'a':6.4, 'b':7.1, 'c':8.8, 'd':9.2, 'e':6.4, 'f':5.6, 'g':3.2 }
pesos = pd.Series(dict)
print(pesos)
# Crear una serie utilizando un secuencia aleatoria distribuida normalmente
# Si no se indican los indices, se numeran automáticamente al instanciar la serie
import numpy as np
aleatorio = pd.Series( np.random.randn(10) )
print(aleatorio)
# Número de elementos
pesos.size
# Forma (número de filas, columnas). En el caso de las series solo son filas
pesos.shape
# Tipo de datos que contiene la serie
pesos.dtype
# Los indices de la serie
pesos.index
# Los valores contenidos en la serie
pesos.values
## Acceso a los elementos de la serie
pesos['a']
# o usando el metodo .get
pesos.get('a')
pesos[0]
pesos['b':'e']
pesos[1:5]
pesos[:'c']
pesos[:3]
pesos['d':]
pesos[3:]
pesos[ ['b','e','c'] ]
pesos[ [1,4,2] ]
pesos[ pesos>7 ]
'a' in pesos
'h' in pesos
'Sep' in produccion
# Asignar valor a un indice
pesos.at['a'] = 5.0
pesos[0]
# Esta vacia la serie
pesos.empty
# Crear una serie vacia
volumenes = pd.Series()
volumenes.empty
pesos.drop(['e','c'])
# Hacer una copia de la Series
pesos2 = pesos.copy()
# Hacer nulos algunos de sus elementos
pesos2.at['b'] = None
pesos2
pesos2.dropna()
# Comprobar si todos/algunos elementos de la serie cumplen una condición lógica
(pesos>5).all()
(pesos<5).any()
# Indice correspondiente al mayor/menor valor de la serie
pesos.argmax()
pesos.argmin()
# Convertir a matriz de numpy
x = pesos.as_matrix()
print(x)
print(type(x))
# Estadísticas descriptivas de la serie
pesos.describe()
# Seleccionar un rango de elementos al principio/final de la serie
print( pesos.head(3) )
print( pesos.tail(2) )
# Crear una nueva serie
medidas = pd.Series({ 'b':6.1, 'c':8.2, 'd':7.3, 'e':5.4, 'f':6.5, 'h':3.2 })
print("pesos\n", pesos)
print("\nmedidas\n",medidas)
pesos.add(medidas)
pesos+medidas
pesos-medidas
pesos*medidas
pesos/medidas
medidas.at['a']=1.
pesos.size, medidas.size
pesos<medidas
pesos.eq(medidas)
pesos!=medidas
pesos.ge(medidas)
# Comprobar si alguno de los elementos es nulo
pesos.isnull()
pesos.count(), pesos.sum(), pesos.mean(), pesos.median()
pesos.sort_values()
pesos.sort_index(ascending=False)
# Salvar la serie en un archivo
pesos.to_csv('pesos.csv')
# Leer la serie de un archivo
copia = pd.read_csv('pesos.csv', squeeze=True, index_col=0, header=None)
copia
# Graficar la serie
import matplotlib.pyplot as plt
pesos.plot()
plt.show()
# Obtener su histograma
pesos.hist()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pandas es una librería orientada a objetos y los objetos más importantes que incluye son las Series y los DataFrames.
Step2: Atributos y Métodos de las Series
Step3: Los elementos de una serie se acceden de forma similar a los de una lista. Se pueden usar los índices de la serie, o la posición de los elementos, por ejemplo
Step4: También se puede seleccionar un subconjunto de índices o posiciones particular
Step5: Indexado lógico
Step6: Se puede comprobar si un determinado indice está presente en la serie
Step7: Eliminar elementos de una Series
Step8: De particular interes son casos en los que la serie contiene elementos nulos (NaN). Todos los elementos nulos se pueden eliminar con el método .dropna
Step9: Métodos para comprobar una condición lógica sobre todos los elementos de una Series
Step10: Operadores aritméticos
Step11: Operadores relacionales
Step12: También se cuenta con las operaciones estádisticas comunes
Step13: Se puede ordernar la serie por valores o por indices, en orden creciente o decreciente
Step14: Operaciones de entrada/salida
|
6,210
|
<ASSISTANT_TASK:>
Python Code:
from biofloat import ArgoData
ad = ArgoData()
wmo_list = ad.get_oxy_floats_from_status()
sdf, _ = ad._get_df(ad._STATUS)
sdf.ix[:, 'WMO':'GREYLIST'].head()
%pylab inline
def dist_plot(df, title):
from datetime import date
ax = df.hist(bins=100)
ax.set_xlabel('AGE (days)')
ax.set_ylabel('Count')
ax.set_title('{} as of {}'.format(title, date.today()))
dist_plot(sdf['AGE'], 'Argo float AGE distribution')
sdfq = sdf.query('(AGE != 0) & (OXYGEN == 1) & (GREYLIST != 1)')
dist_plot(sdfq['AGE'], title='Argo oxygen float AGE distribution')
print 'Count age_gte 0340:', len(sdfq.query('AGE >= 340'))
print 'Count age_gte 1000:', len(sdfq.query('AGE >= 1000'))
print 'Count age_gte 2000:', len(sdfq.query('AGE >= 2000'))
print 'Count age_gte 2200:', len(sdfq.query('AGE >= 2200'))
print 'Count age_gte 3000:', len(sdfq.query('AGE >= 3000'))
len(ad.get_oxy_floats_from_status(age_gte=2200))
%%time
from os.path import expanduser, join
ad.set_verbosity(2)
ad = ArgoData(cache_file = join(expanduser('~'),
'biofloat_fixed_cache_age2200_profiles2_variablesDOXY_ADJUSTED-PSAL_ADJUSTED-TEMP_ADJUSTED.hdf'))
wmo_list = ad.get_oxy_floats_from_status(2200)
# Use 'update_cache=False' to avoid doing lookups for new profile data
df = ad.get_float_dataframe(wmo_list, max_profiles=2, update_cache=False)
# Parameter long_name and units copied from attributes in NetCDF files
time_range = '{} to {}'.format(df.index.get_level_values('time').min(),
df.index.get_level_values('time').max())
parms = {'TEMP_ADJUSTED': 'SEA TEMPERATURE IN SITU ITS-90 SCALE (degree_Celsius)',
'PSAL_ADJUSTED': 'PRACTICAL SALINITY (psu)',
'DOXY_ADJUSTED': 'DISSOLVED OXYGEN (micromole/kg)'}
plt.rcParams['figure.figsize'] = (18.0, 8.0)
fig, ax = plt.subplots(1, len(parms), sharey=True)
ax[0].invert_yaxis()
ax[0].set_ylabel('SEA PRESSURE (decibar)')
for i, (p, label) in enumerate(parms.iteritems()):
ax[i].set_xlabel(label)
ax[i].plot(df[p], df.index.get_level_values('pressure'), '.')
plt.suptitle('Float(s) ' + ' '.join(wmo_list) + ' from ' + time_range)
import pylab as plt
from mpl_toolkits.basemap import Basemap
plt.rcParams['figure.figsize'] = (18.0, 8.0)
m = Basemap(llcrnrlon=15, llcrnrlat=-90, urcrnrlon=390, urcrnrlat=90, projection='cyl')
m.fillcontinents(color='0.8')
m.scatter(df.index.get_level_values('lon'), df.index.get_level_values('lat'), latlon=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the default list of floats that have oxygen data.
Step2: We can explore the distribution of AGEs of the Argo floats by getting the status data in a DataFrame (sdf).
Step3: Define a function (dist_plot) and plot the distribution of the AGE column.
Step4: There are over 600 floats with an AGE of 0. The .get_oxy_floats_from_status() method does not select these floats as I believe they are 'inactive'. Let's count the number of non-greylisted oxygen floats at various AGEs so that we can build a reasonably sized test cache.
Step5: Compare the 2200 count with what .get_oxy_floats_from_status(age_gte=2200) returns.
Step6: That's reassuring! Now, let's build a custom cache file of 2 profile from the 19 floats that have an AGE >= 2200 days.
Step7: Plot the profiles.
Step8: Plot the profiles on a map.
|
6,211
|
<ASSISTANT_TASK:>
Python Code:
#importing all required modules
#important otherwise pop-up window may not work
%matplotlib inline
import numpy as np
import scipy as sp
from scipy.integrate import odeint, ode, romb, cumtrapz
import matplotlib as mpl
import matplotlib.pyplot as plt
from math import *
import seaborn
from IPython.display import Image
#bokeh
from bokeh.plotting import figure, output_file, output_notebook, show
# RMS value of voltage
u = 230
#time vector
t = np.linspace(0,0.4, 1000)
#frequency & angular frequency
f = 50
omega = 2 * pi * f
#Resitance (values to consider 5 and 10 Ohms)
R = 5
#Inductance
L = 0.1
XL = 2*pi*f*L
#Capacitance (worth to consider 0.01 - two inertia or 0.001 - oscillator)
C = 0.001
XC = 1/(omega*C)
#Phase angle
phi=atan((XL-XC)/R)
#closing angle [rad]
alpha = 0
XL, XC
ua = [u for k in t]
#definition of the function dp/dt
def di(y,t):
#x = i, p = di/dt
x, p = y[0], y[1]
dx = p
dp = 1/L*(-R*p-(1/C)*x)
return [dx, dp]
#initial state
#initial capacitor voltage
uc0 = 0
y0 = [0.0, 1/L*(u-uc0)]
y0
I = odeint(di, y0, t)
ia = I[:,0]
# Capacitor voltage definition:
duc = ia/C
uc = cumtrapz(duc, dx=0.4/1000, initial=0)
# after integration vectors t and uc had different lengths so I need to append one item
np.append(uc, uc[999])
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8,8))
ax[0].plot(t,ia, label="Current")
ax[0].set_ylabel("Current [A]")
ax[0].set_xlabel("Time [s]")
ax[0].set_title("Current in R-L-C circuit during switch-on")
ax[0].legend()
ax[1].plot(t,ua, label="Supply voltage", color="green")
ax[1].plot(t,uc, label="Capacitor voltage", color="orange")
ax[1].set_ylabel("Voltage [V]")
ax[1].set_xlabel("Time [s]")
ax[1].set_title("Supply voltage")
ax[1].legend()
fig.tight_layout()
#checking damping factor: if below 1 - underdamped, if above 1 - overdamped
damp = (R/2)*sqrt(C/L)
damp
ub = [sqrt(2)*u*sin(omega*k + alpha) for k in t]
# definition of the function dp/dt
def di(y,t):
#x = i, p = di/dt
x, p = y[0], y[1]
dx = p
dp = 1/L*(omega*sqrt(2)*u*cos(omega*t + alpha)-R*p-(1/C)*x)
return [dx, dp]
#initial state
#initial capacitor voltage
uc0 = 0
y0 = [0.0, 1/L*(ua[0]-uc0)]
I = odeint(di, y0, t)
ib = I[:,0]
#Capacitor voltage derivative
duc2 = ib/C
uc2 = cumtrapz(duc2, dx=0.4/1000, initial=0)
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8,8))
ax[0].plot(t,ib, label="Current")
ax[0].set_ylabel("Current [A]")
ax[0].set_xlabel("Time [s]")
ax[0].set_title("Current in R-L-C circuit during switch-on")
ax[0].legend()
ax[1].plot(t,ub, label="Line voltage", color="green")
ax[1].plot(t,uc2, label="Capacitor voltage", color="orange")
ax[1].set_ylabel("Voltage [V]")
ax[1].set_xlabel("Time [s]")
ax[1].set_title("Supply voltage")
ax[1].legend()
fig.tight_layout()
#checking the amplitude value in steady state
Im = sqrt(2)*u/(sqrt(R**2+(XL-XC)**2))
Im
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: RLC circuit is governed by the following formulas
Step2: RLC circuit fed with dc voltage
Step3: RLC Circuit with sinusoidal voltage
|
6,212
|
<ASSISTANT_TASK:>
Python Code:
import os
os.listdir('partonopeus')
inputFiles = {}
for inputFile in os.listdir('partonopeus'):
siglum = inputFile[0]
contents = open('partonopeus/' + inputFile,'rb').read()
inputFiles[siglum] = contents
from lxml import etree
print(etree.tostring(etree.XML(inputFiles['A'])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We create a dictionary to hold our input files, using the single-letter filename before the '.xml' extension as the key and the file itself as the value. The lxml library that we use to parse XML requires that we open the XML file for reading in bytes mode.
Step2: We load the lxml library and use the .XML() method to parse the file and the .tostring() method to stringify the results so that we can examine them. In Real Life, if we need to manipulate the XML (e.g., to search it with XPath), we would keep it as XML. That is, the tostring() process is used here just to create something human-readable for pedagogical purposes.
|
6,213
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'sandbox-1', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
6,214
|
<ASSISTANT_TASK:>
Python Code:
from landlab.components import SpeciesEvolver, Profiler
from landlab.components.species_evolution import ZoneController
from landlab.io import read_esri_ascii
from landlab.plot import imshow_grid
import matplotlib.pyplot as plt
import numpy as np
# Create a model grid and set a topographic elevation field.
(mg, z) = read_esri_ascii('model_grid_steady_state_elevation.txt')
mg.at_node['topographic__elevation'] = z
# Set the top and bottom grid boundaries to open.
mg.set_closed_boundaries_at_grid_edges(right_is_closed=True,
top_is_closed=False,
left_is_closed=True,
bottom_is_closed=False)
# Plot the elevation field.
imshow_grid(mg, 'topographic__elevation', colorbar_label='elevation (m)', shrink=0.4)
# Set a rate of temperature decrease per meter of elevation increase.
lapse_rate = -0.005
# Create a surface air temperature field.
temperature_at_base_level = 26
T = lapse_rate * z + temperature_at_base_level
mg.add_field('node', 'land_surface_air__temperature', T)
# Plot temperature field.
imshow_grid(
mg, 'land_surface_air__temperature',
colorbar_label='surface air\ntemperature (C$\degree$)',
shrink=0.4,
cmap='bwr'
)
se = SpeciesEvolver(mg)
se.record_data_frame
def zone_func(grid):
Get a mask of the total zone extent.
T = grid.at_node['land_surface_air__temperature']
T_max = 25
T_min = 18
zone_extent_mask = np.all([T >= T_min, T <= T_max], 0)
return zone_extent_mask
imshow_grid(mg, zone_func(mg), allow_colorbar=False, plot_name='zone extent in white')
zc = ZoneController(mg, zone_func, minimum_area=50000)
# Create a map of all zones.
zone_map = np.zeros(mg.number_of_nodes)
for i, zone in enumerate(zc.zones):
zone_map[zone.mask] = i + 1
# Plot zone map.
cmap = plt.cm.tab20
cmap.set_under('w')
title = 'zone count = {}'.format(len(zc.zones))
imshow_grid(mg, zone_map, cmap=cmap, allow_colorbar=False, plot_name=title, vmin=1)
# Plot profile in map view.
endpoints = [29874, 174]
profiler = Profiler(mg, endpoints)
profiler.run_one_step()
profiler.plot_profiles_in_map_view(
color='k', field=zone_map, cmap=cmap,
allow_colorbar=False, plot_name='Profile (trace in black)', vmin=1
)
# Plot profile.
fig, ax0 = plt.subplots()
profiler.plot_profiles(
ylabel='Topographic Elevation (m)',
color='k',
title='Profile')
ax0.set_ylim([0, 3000])
ax1 = ax0.twinx()
ax1.set_ylim(ax0.get_ylim())
ax1.set_yticklabels(lapse_rate * ax0.get_yticks() + temperature_at_base_level)
ax1.set_ylabel('Surface Air Temperature (\N{DEGREE SIGN}C)')
# Include species temperature bounds.
upper = (18 - temperature_at_base_level) / lapse_rate
ax0.plot([0, 5000], [upper, upper], 'c--', label='zone minimum temperature')
lower = (25 - temperature_at_base_level) / lapse_rate
ax0.plot([0, 5000], [lower, lower], 'r--', label='zone maximum temperature')
ax0.legend();
taxa = zc.populate_zones_uniformly(1)
se.track_taxa(taxa)
se.record_data_frame
se.taxa_data_frame
# Update the temperature field.
temperature_at_base_level += 5
mg.at_node['land_surface_air__temperature'] = lapse_rate * z + temperature_at_base_level
# Plot the temperature field.
imshow_grid(
mg, 'land_surface_air__temperature',
colorbar_label='surface air\ntemperature (C$\degree$)',
shrink=0.4, cmap='bwr'
)
dt = 1
zc.run_one_step(dt)
se.run_one_step(dt)
# Create a map of all zones.
zone_map = np.zeros(mg.number_of_nodes)
for i, zone in enumerate(zc.zones):
zone_map[zone.mask] = i + 1
# Plot zone map.
title = 'zone count = {} (zone is shaded)'.format(len(zc.zones))
imshow_grid(mg, zone_map, cmap=cmap, allow_colorbar=False, plot_name=title, vmin=0.5)
se.taxa_data_frame
# Plot profile in map view.
endpoints = [174, 29874]
profiler = Profiler(mg, endpoints)
profiler.run_one_step()
profiler.plot_profiles_in_map_view(
color='k', field=zone_map, cmap=cmap,
allow_colorbar=False, plot_name='Profile (trace in black)', vmin=0.5
)
# Plot profile.
fig, ax0 = plt.subplots()
profiler.plot_profiles(
ylabel='Topographic Elevation (m)',
color='k',
title='Profile')
ax0.set_ylim([0, 3000])
ax1 = ax0.twinx()
ax1.set_ylim(ax0.get_ylim())
ax1.set_yticklabels(lapse_rate * ax0.get_yticks() + temperature_at_base_level)
ax1.set_ylabel('Surface Air Temperature (\N{DEGREE SIGN}C)')
# Include species temperature bounds.
upper = (18 - temperature_at_base_level) / lapse_rate
ax0.plot([0, 5000], [upper, upper], 'c--', label='zone minimum temperature')
lower = (25 - temperature_at_base_level) / lapse_rate
ax0.plot([0, 5000], [lower, lower], 'r--', label='zone maximum temperature')
ax0.legend();
# Update the surface temperature field.
temperature_at_base_level += 5
mg.at_node['land_surface_air__temperature'] = lapse_rate * z + temperature_at_base_level
# Plot temperature field.
imshow_grid(
mg, 'land_surface_air__temperature',
colorbar_label='surface air\ntemperature (C$\degree$)',
shrink=0.4, cmap='bwr'
)
zc.run_one_step(dt)
se.run_one_step(dt)
# Create a map of all zones.
zone_map = np.zeros(mg.number_of_nodes)
for i, zone in enumerate(zc.zones):
zone_map[zone.mask] = i + 1
# Plot zone map.
title = 'zone count = {}'.format(len(zc.zones))
imshow_grid(mg, zone_map, cmap=cmap, allow_colorbar=False, plot_name=title, vmin=0.5)
se.taxa_data_frame
se.record_data_frame
# Create a plot of key model variables over time.
time = se.record_data_frame.time
temperature = [26, 31, 36]
n_zones = zc.record_data_frame.zones
n_species = se.record_data_frame.taxa
plt.plot(time, temperature, 'o-c', label='temperature (\N{DEGREE SIGN}C)')
plt.plot(time, n_zones, 's-m', label='zone count')
plt.plot(time, se.record_data_frame.taxa, 'd-y', label='species count')
plt.xlabel('time')
plt.xticks(time)
plt.legend();
# Plot profile in map view.
endpoints = [15720, 15790]
profiler2 = Profiler(mg, endpoints)
profiler2.run_one_step()
profiler2.plot_profiles_in_map_view(
color='k', field=zone_map, cmap=cmap,
allow_colorbar=False, plot_name='Profile (trace in black)', vmin=1
)
# Plot profile.
fig, ax0 = plt.subplots()
profiler2.plot_profiles(
ylabel='Topographic Elevation (m)',
color='k',
title='Profile')
ax0.axis(ymin=1200, ymax=2600)
ax1 = ax0.twinx()
ax1.set_ylim(ax0.get_ylim())
ax1.set_yticklabels(lapse_rate * ax0.get_yticks() + temperature_at_base_level)
ax1.set_ylabel('Surface Air Temperature (\N{DEGREE SIGN}C)')
# Include species min elevation line.
z_min = (25 - temperature_at_base_level) / lapse_rate
ax0.plot([0, 3500], [z_min, z_min], 'c--', label='species minimum temperature')
ax0.legend();
imshow_grid(
mg, 'taxa__richness', colorbar_label='taxa richness',
shrink=0.4, cmap='viridis'
)
zc.record_data_frame
zc.record_data_frame.zones.max()
se.get_extant_taxon_objects()
se.get_extant_taxon_objects(time=0)
taxon_12 = se.get_extant_taxon_objects(tids=[12])[0]
taxon_12
mask = taxon_12.range_mask
imshow_grid(mg, mask, plot_name='taxon 12 range in white', allow_colorbar=False)
filtered_taxa = se.get_extant_taxon_objects(ancestor=0)
[t.tid for t in filtered_taxa]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the grid
Step2: Create a grid field of air temperature at the land surface
Step3: Setup SpeciesEvolver and zones
Step4: View record_data_frame of our SpeciesEvolver instance. Time and other model variables are stored in this attribute that is a Pandas DataFrame. The first and second columns are the row number and model time, respectively. In the third column we see that 0 taxa exist. We have yet to introduce taxa.
Step6: About zones
Step7: To demonstrate the output of this function, True values are shaded white in the plot below. Zones will be created in the masked area (in white). Later we will see that two zones are created, one for each cluster of True values.
Step8: Initialize ZoneController
Step9: Above, the mask returned by the zone_func indicates where zones should be created.
Step10: Multiple instances of ZoneController may be used with the same instance of SpeciesEvolver. This is useful when you wish to model groups of taxa within the same grid where each group needs a different zone function, for instance, fish and bears.
Step11: The profile (north is on the left) also illustrates that the two zones are seperated by the area around the main divide. This area is below the minimum zone temperature, therefore the ZoneController excluded zone creation here.
Step12: Introduce taxa to SpeciesEvolver
Step13: Two taxa now exist in SpeciesEvolver. In this notebook, each represents a vegetation species. In this model, nothing specific to vegetation species is needed, beyond setting the zone extent by air temperature using the zone function, thus the built functionality of ZoneTaxon is sufficient.
Step14: The SpeciesEvolver attribute, taxa_data_frame provides data about all of the tracked taxa, both extant and extinct. The data is presented in a Pandas DataFrame. Each row is data about a taxon.
Step15: tid is the identifier used to reference and retrieve Taxon objects, demostrated later in this notebook. Taxa are assigned identifiers in the order they are introduced to SpeciesEvolver.
Step16: Here we advance ZoneController and SpeciesEvolver in time by one time step. ZoneController is advanced first in order to update the zones prior to calling SpeciesEvolver.
Step17: One zone now exists because the temperature increase raised the area that falls within the temperature range prescribed in zone_func.
Step18: A 'many-to-one' zone connectivity occurred in this time step relative to the previous time step, meaning neither speciation or extinction were triggered by zone change. (See zone connectivity table above.)
Step19: Plot a profile across the main divide
Step20: Species were able to cross the main divide following the temperature increase. The two species now exist in the same area along the main divide. Coexisting has no effect because ZoneTaxon do not interact in the default implemention of the code for this species, although species can be made to influence each other by extending ZoneTaxon (see the documentation of this class).
Step21: Advance ZoneController and SpeciesEvolver, and plot zones.
Step22: The grid now contains more zones than the prior time step. The macroevolutionary effect of this one-to-many zone connectivity relationship is examined in the next section.
Step23: In the SpeciesEvolver record DataFrame, we see 20 speciations at time 2.
Step24: Next we will examine the model history using the plot below
Step25: Plot time 1 profile along the main divide
Step26: The profile trace crosses two zones (upper figure). The topography is above the elevation with the minimum species temperature for these two zones (lower figure).
Step27: Similar to SpeciesEvolver, ZoneController has a record_data_frame that stores model variables over time. The ZoneController stores variables pertinent to zones.
Step28: Pandas DataFrame methods can be used on record_data_frame. For example, here we get the maximum capture count.
Step29: Retrieve taxon objects
Step30: The taxa can be filtered by the time they existed. To do so, we can use the optional parameter, time to indicate we want the taxa extant at time 0, the two taxa introduced at the beginning of the model.
Step31: Get taxa by their identifier using the tid parameter. Here we request only one taxon, the one with a tid of 12. This method always returns a list so we index the first and only element in this list.
Step32: With a taxon object retrieved, we can work with it as we need. Here we plot its geographic range. The range_mask attribute of a taxon is a mask of its geographic range.
Step33: The taxa with a common ancestor can be retrieved with the ancestor parameter. Here we get the taxa that descended from taxon 0 and then print only the identifiers of these descendents.
|
6,215
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'sandbox-2', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
6,216
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import timeit
import warnings
from itertools import product
warnings.filterwarnings("ignore")
pd.options.mode.chained_assignment = None # default='warn'
from ipyleaflet import (
Map,
Marker,
TileLayer, ImageOverlay,
Polyline, Polygon, Rectangle, Circle, CircleMarker,
GeoJSON,
DrawControl
)
import matplotlib as mpl
import matplotlib.cm
import matplotlib.colors
import numpy as np
def n_colors(n, colormap=mpl.cm.Blues):
data = np.linspace(0.0,1.0,n)
c = [mpl.colors.rgb2hex(d[0:3]) for d in colormap(data)]
return c
def data_to_colors(data, colormap=mpl.cm.Blues):
c = [mpl.colors.rgb2hex(d[0:3]) for d in colormap(mpl.colors.Normalize()(data))]
return c
#Функция возвращает код региона по координатам
#0- когда нет региона
def get_region(lon,lat):
r=0
r=regions.loc[(regions.west<lon)&
(regions.east>lon)&
(regions.south<lat)&
(regions.north>lat)
,'region'].values
if r.shape[0]==0:
return 0
return r[0]
# загружаем данные регионов
regions=pd.read_csv('g:/python_kurs/kurs6/taxi/regions.csv',sep=';')
regions.head(6)
# загружаем агрегированные данные за май 2016
data=pd.read_csv('g:/python_kurs/kurs6/taxi/data_05_2016.csv')
data.head(6)
data_region=data.groupby(by=['region_pickup'],axis=0)[['vcount']].sum().reset_index()
print 'Количество ячеек, из которых в мае не было совершено ни одной поездки ',len(data_region[data_region.vcount==0])
data_region.head()
#Координаты Empire State Building
lat = 40.748445
lon = -73.985664
region = get_region(lon,lat)
print region
center = [lat, lon]
zoom = 20
mark = Marker(location=center)
#Нарисуйте статическую карту Нью-Йорка. Поставьте на карте точку там, где находится Эмпайр-Стейт-Билдинг.
m = Map(center=center, zoom=zoom)
m += mark
m.dragging=False
m.double_click_zoom=False
m.keyboard=False
m.tap=False
m.touch_zoom=False
m.zoom_control=False
m
zoom = 10
m = Map(default_tiles=TileLayer(opacity=1.0), center=center, zoom=zoom)
m += mark
m.dragging=False
m.double_click_zoom=False
m.keyboard=False
m.tap=False
m.touch_zoom=False
m.zoom_control=False
colors1 = data_to_colors(data_region['vcount'])
%%time
dr=data_region['region_pickup']
dr=np.arange(1,2500,1)
for region, color in zip(dr, colors1):
#print region,color
r=regions.loc[regions.region==region]
bounds = [(r.north.values[0],r.west.values[0]),
(r.north.values[0],r.east.values[0]),
(r.south.values[0],r.east.values[0]),
(r.south.values[0],r.west.values[0])
]
#print r,bounds
pg = Polygon(locations=bounds, weight=3,
color=color, opacity=0.8, fill_opacity=0.5,
fill_color=color)
m += pg
m
#Координаты статуи свободы
lat = 40.689237
lon = -74.044521
region = get_region(lon,lat)
print region
center = [lat, lon]
zoom = 15
mark = Marker(location=center)
m = Map(default_tiles=TileLayer(opacity=1.0), center=center, zoom=zoom)
m
m += mark
zoom = 10
m = Map(default_tiles=TileLayer(opacity=1.0), center=center, zoom=zoom)
m
m += mark
data_r_mean=data.groupby(by=['region_pickup'],axis=0)[['vcount']].mean().reset_index()
data_r_mean.head()
print data_r_mean[data_r_mean.region_pickup==1231]
data_rm=data_r_mean.loc[data_r_mean.vcount>=5]
print 'Кол-во ячеек,из которых в мае совершается в среднем больше или равно 5 поездок в час ', len(data_rm)
colors1 = data_to_colors(data_rm['vcount'])
%%time
dr=data_rm['region_pickup']
#dr=np.arange(1,2500,1)
for region, color in zip(dr, colors1):
#print region,color
r=regions.loc[regions.region==region]
bounds = [(r.north.values[0],r.west.values[0]),
(r.north.values[0],r.east.values[0]),
(r.south.values[0],r.east.values[0]),
(r.south.values[0],r.west.values[0])
]
#print r,bounds
pg = Polygon(locations=bounds, weight=3,
color=color, opacity=0.8, fill_opacity=0.5,
fill_color=color)
m += pg
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Просуммируйте общее количество поездок такси из каждой географической зоны и посчитайте количество ячеек, из которых в мае не было совершено ни одной поездки.
Step2: Поверх статической карты Нью-Йорка визуализируйте данные о поездках из каждой ячейки так, чтобы цветовая шкала, в которую вы окрашиваете каждую ячейку, показывала суммарное количество поездок такси из неё.
Step3: Вставьте интерактивную карту Нью-Йорка — такую, которую можно прокручивать и увеличивать. Поставьте метку там, где находится статуя свободы.
Step4: Нарисуйте на интерактивной карте Нью-Йорка ячейки так, чтобы их цвет показывал среднее за месяц количество поездок такси в час из этой зоны.
|
6,217
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from matplotlib import pyplot as plt
# Number of row and columns in the matrix
nrows = 200
# Number of maximum iterations
maxiters = 500
# Radius of neighbors that affect the value of the current cell (radius of 1 means that only the 8 cells immediately
# adjacent have influence)
radius = 1
# How many steps of the evolution of the system behavior will be shown
nshows = 6
# How do we handle the matrix borders: 'fix' keeps the values on first and last rows and columns constant,
# 'wrap' wraps the first and last rows and columns around
borders='fix'
def makematrix(nrows):
listrand = [] #the empty list will contain all the values for the initial matrix
for i in range(nrows*nrows):
listrand.append(np.random.randint(0,2)) # By changing the 2 you allow for the existence of more states
# Turn the list into a square matrix
listrand = np.matrix(listrand)
listrand.shape=(nrows,nrows)
return listrand
def showmatrix(listrand, nrows, i):
plt.imshow(listrand) # imshow() displays the matrix values as a grid
plt.xticks([0, nrows])
plt.yticks([0, nrows])
plt.title('Iteration: ' + str(i))
plt.show()
return
def systemsym(nrows, maxiters, radius, nshows, borders):
# Variable that holds the while condition
cellschanged = 1
# Initialize iteration counter
niters = 0
# Create a 3D matrix of size maxiters * nrows * nrows that will hold the evolution of the system
# each nrows * nrows matrix will hold the values of the matrix at a single iteration
matrixevo = np.zeros((maxiters,nrows,nrows))
# The first 2D matrix in matrixevo is initialized with random assignation of each cell to a given group
# by invoking the makematrix() function
matrixevo[0]=makematrix(nrows)
# Because we don't know how many iterations the system needs to undergo before reaching convergence we will
# perform the updates within a while loop that depends on the value of the cellschanged variable
while cellschanged>0:
niters= niters + 1
# Start with a matrix which is a copy of the one from the previous time step and then update each cell
matrixevo[niters] = np.copy(matrixevo[niters-1])
# Cycle through each cell in the current matrix to determine its new value, but the start and end places
# to cycle will depend on the way the borders are handled
cycle_handler = {'wrap':0, 'fix':radius}
for row in range(cycle_handler[borders] , nrows-cycle_handler[borders]):
for column in range(cycle_handler[borders] , nrows-cycle_handler[borders]):
# This section is where the update algorithm is defined, change it to your specific model
# Currently it just runs a majority vote based on the local neighbors depending on the radius value
votes = 0
for deltarow in np.arange(-radius, radius+1):
for deltacolumn in np.arange(-radius, radius+1):
votes = votes + matrixevo[niters-1 , (row+deltarow)%nrows , (column+deltacolumn)%nrows]
# Then update the value of the current cell based on the number of votes
# The number of neighboring cells evaluated depends on the radius
ncells_eval = (radius + 2 + (radius-1))**2
if votes>np.floor(ncells_eval/2):
matrixevo[niters,row,column]=1
else:
matrixevo[niters,row,column]=0
# We only want to iterate until the cells stop changing value or we reach the maximum number of iterations
# calculate how many cells changed value, if any did cellschanged will remain higher than 0
cellschanged = np.abs(np.sum(np.sum(matrixevo[niters]-matrixevo[niters-1])))
# Check if we reached the maximum number of iterations
if niters==maxiters:
cellschanged=0
print('Finished after ', niters, ' iterations')
# Call the showmatrices() function to display the evolution of the system (see function definition below)
showmatrices(matrixevo, nrows, niters, nshows)
return matrixevo, niters
def showmatrices(matrixevo, nrows, niters, nshows):
# calculate what step size would you need for the desired amount of displays
# If the simulation converged in less steps that the maximum amount of steps we want to show, then show them all
if niters < nshows:
for i in range(niters+1):
showmatrix(matrixevo[i], nrows, i)
else:
_ , step_size = np.linspace(0, niters, nshows, retstep=True)
step_size = np.ceil(step_size) # step_size needs to be an integer and can't be 0
for i in range(0, niters+1, int(step_size)):
showmatrix(matrixevo[i], nrows, i)
# add the final step, which is usually not included in the stepping counting
if i<niters:
showmatrix(matrixevo[niters], nrows, niters)
return
matrixevo1, niters1 = systemsym(nrows, maxiters, radius, nshows, borders)
borders='wrap'
matrixevo2, niters2 = systemsym(nrows, maxiters, radius, nshows, borders)
matrixevo1[10] # 2D matrix with the state of the system at the 10th time step
matrixevo2[niters2,2,:]
matrixevo1[0,0,:]-matrixevo1[niters1,0,:]
matrixevo2[0,0,:]-matrixevo2[niters2,0,:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Parameters
Step2: The<code> makematrix()</code> function takes as argument the number of rows and columns of the matrix to model
Step3: The<code> showmatrix()</code> function takes as arguments
Step4: Simulation Function
Step5: The<code> showmatrices()</code> function takes as arguments
Step6: Running the model
Step7: Because the 3D matrix with all simulated steps was returned, it is possible to access an individual time step
Step8: The general format to access specific rows and columns in a time step is
Step9: We can check that in the case of fixed borders the first column didn't change between the initial and last iteration
Step10: While that is not the case for the wrapped borders
|
6,218
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # pandas for handling mixed data sets
import numpy as np # numpy for basic math and matrix operations
# create a data frame containing variables of disparate scale
scratch_df = pd.DataFrame({'x1': pd.Series(np.random.choice(1000, 20)),
'x2': pd.Series(np.random.choice(20, 20))})
scratch_df
# create a deep copy
# so this cell can be run many times w/o error
scratch_df1 = scratch_df.copy()
# loop through columns
# create new column
# apply z-score formula to new column
for col_name in scratch_df.columns:
new_col_name = col_name + '_std'
scratch_df1[new_col_name] = (scratch_df[col_name] - scratch_df[col_name].mean())/scratch_df[col_name].std()
# new variables are on the same scale
scratch_df1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create sample data set
Step2: Standardize
|
6,219
|
<ASSISTANT_TASK:>
Python Code:
def resp_elas(m,c,k, cC,cS,w, F, x0,v0):
wn2 = k/m ; wn = sqrt(wn2) ; beta = w/wn
z = c/(2*m*wn)
wd = wn*sqrt(1-z*z)
# xi(t) = R sin(w t) + S cos(w t) + D
det = (1.-beta**2)**2+(2*beta*z)**2
R = ((1-beta**2)*cS + (2*beta*z)*cC)/det/k
S = ((1-beta**2)*cC - (2*beta*z)*cS)/det/k
D = F/k
A = x0-S-D
B = (v0+z*wn*A-w*R)/wd
def x(t):
return exp(-z*wn*t)*(A*cos(wd*t)+B*sin(wd*t))+R*sin(w*t)+S*cos(w*t)+D
def v(t):
return (-z*wn*exp(-z*wn*t)*(A*cos(wd*t)+B*sin(wd*t))
+wd*exp(-z*wn*t)*(B*cos(wd*t)-A*sin(wd*t))
+w*(R*cos(w*t)-S*sin(w*t)))
return x,v
def resp_yield(m,c, cC,cS,w, F, x0,v0):
# csi(t) = R sin(w t) + S cos(w t) + Q t
Q = F/c
In this case the equation of motion is
𝑚𝑥¨+𝑐𝑥˙=𝑃(𝑡),
the homogeneous response is
𝑥(𝑡)=𝐴exp(−𝑐𝑚𝑡)+𝐵,
and the particular integral, for a load described as in the previous case, is (slightly different...)
𝜉(𝑡)=𝑆cos𝜔𝑡+𝑅sin𝜔𝑡+𝐷𝑡.
Having computed 𝑅,𝑆, and 𝐷 from substituting 𝜉 in the equation of motion, 𝐴 and 𝐵 by imposing the initial conditions,we can define the displacement and velocity functions and, finally, return these two functions to the caller.
det = w**2*(c**2+w**2*m**2)
R = (+w*c*cC-w*w*m*cS)/det
S = (-w*c*cS-w*w*m*cC)/det
# x(t) = A exp(-c t/m) + B + R sin(w t) + S cos(w t) + Q t
# v(t) = - c A/m exp(-c t/m) + w R cos(w t) - w S sin(w t) + Q
#
# v(0) = -c A / m + w R + Q = v0
A = m*(w*R + Q - v0)/c
# x(0) = A + B + S = x0
B = x0 - A - S
def x(t):
return A*exp(-c*t/m)+B+R*sin(w*t)+S*cos(w*t)+Q*t
def v(t):
return -c*A*exp(-c*t/m)/m+w*R*cos(w*t)-w*S*sin(w*t)+Q
return x,v
def bisect(f,val,x0,x1):
h = (x0+x1)/2.0
fh = f(h)-val
if abs(fh)<1e-8 : return h
f0 = f(x0)-val
if f0*fh > 0 :
return bisect(f, val, h, x1)
else:
return bisect(f, val, x0, h)
mass = 1000. # kg
k = 40000. # N/m
zeta = 0.03 # damping ratio
fy = 2500. # N
print('Limit displacement Uy =', fy*1000/k, 'mm')
damp = 2*zeta*sqrt(k*mass)
xy = fy/k # m
t1 = 0.3 # s
w = pi/t1 # rad/s
Po = 6000. # N
x0=0.0 # m
v0=0.0 # m/s
x_next, v_next = resp_elas(mass,damp,k, 0.0,Po,w, 0.0, x0,v0)
t_yield = bisect(x_next, xy, 0.0, t1)
print(t_yield, x_next(t_yield)*k)
t_el = linspace( 0.0, t_yield, 201)
x_el = vectorize(x_next)(t_el)
v_el = vectorize(v_next)(t_el)
# ------------------------------
figure(0)
plot(t_el,x_el,
(0,0.25),(xy,xy),'--b',
(t_yield,t_yield),(0,0.0699),'--b')
title("$x_{el}(t)$")
xlabel("Time, s")
ylabel("Displacement, m")
# ------------------------------
figure(1)
plot(t_el,v_el)
title("$\dot x_{el}(t)$")
xlabel("Time, s")
ylabel("Velocity, m/s")
x0=x_next(t_yield)
v0=v_next(t_yield)
print(x0, v0)
cS = Po*cos(w*t_yield)
cC = Po*sin(w*t_yield)
print(Po*sin(w*0.55), cS*sin(w*(0.55-t_yield))+cC*cos(w*(0.55-t_yield)))
x_next, v_next = resp_yield(mass, damp, cC,cS,w, -fy, x0,v0)
t_y1 = linspace(t_yield, t1, 101)
x_y1 = vectorize(x_next)(t_y1-t_yield)
v_y1 = vectorize(v_next)(t_y1-t_yield)
figure(3)
plot(t_el,x_el, t_y1,x_y1,
(0,0.25),(xy,xy),'--b',
(t_yield,t_yield),(0,0.0699),'--b')
xlabel("Time, s")
ylabel("Displacement, m")
# ------------------------------
figure(4)
plot(t_el, v_el, t_y1, v_y1)
xlabel("Time, s")
ylabel("Velocity, m/s")
x0 = x_next(t1-t_yield)
v0 = v_next(t1-t_yield)
print(x0, v0)
x_next, v_next = resp_yield(mass, damp, 0, 0, w, -fy, x0, v0)
t2 = t1 + bisect( v_next, 0.0, 0, 0.3)
print(t2)
t_y2 = linspace( t1, t2, 101)
x_y2 = vectorize(x_next)(t_y2-t1)
v_y2 = vectorize(v_next)(t_y2-t1)
print(x_next(t2-t1))
figure(5)
plot(t_el,x_el, t_y1,x_y1, t_y2, x_y2,
(0,0.25),(xy,xy),'--b',
(t_yield,t_yield),(0,0.0699),'--b')
xlabel("Time, s")
ylabel("Displacement, m")
# ------------------------------
figure(6)
plot(t_el, v_el, t_y1, v_y1, t_y2, v_y2)
xlabel("Time, s")
ylabel("Velocity, m/s")
x0 = x_next(t2-t1) ; v0 = 0.0
x_next, v_next = resp_elas(mass,damp,k, 0.0,0.0,w, k*x0-fy, x0,v0)
t_e2 = linspace(t2,4.0,201)
x_e2 = vectorize(x_next)(t_e2-t2)
v_e2 = vectorize(v_next)(t_e2-t2)
# ------------------------------
figure(7) ;
plot(t_el, x_el, '-b',
t_y1, x_y1, '-r',
t_y2, x_y2, '-r',
t_e2, x_e2, '-b',
(0.6, 4.0), (x0-xy, x0-xy), '--y')
title("In blue: elastic phases.\n"+
"In red: yielding phases.\n"+
"Dashed: permanent plastic deformation.")
xlabel("Time, s")
ylabel("Displacement, m")
def make_p(p0,t1):
make_p(p0,t1) returns a 1/2 sine impulse load function, p(t)
def p(t):
""
if t<t1:
return p0*sin(t*pi/t1)
else:
return 0.0
return p
def make_kt(k,fy):
"make_kt(k,fy) returns a function kt(u,v,up) returning kt, up"
def kt(u,v,up):
f=k*(u-up)
if (-fy)<f<fy: return k,up
if fy<=f and v>0: up=u-uy;return 0,up
if fy<=f and v<=0: up=u-uy;return k,up
if f<=(-fy) and v<0: up=u+uy;return 0,up
else: up=u+uy;return k,up
return kt
# Exercise from lesson 04
#
mass = 1000.00 # kilograms
k = 40000.00 # Newtons per metre
zeta = 0.03 # zeta is the damping ratio
fy = 2500.00 # yelding force, Newtons
t1 = 0.30 # half-sine impulse duration, seconds
p0 = 6000.00 # half-sine impulse peak value, Newtons
uy = fy/k # yelding displacement, metres
# using the above constants, define the loading function
p=make_p(p0,t1)
# the following function, given the final displacement, the final
# velocity and the initial plastic deformation returns a) the tangent
# stiffness b) the final plastic deformation
kt=make_kt(k,fy)
# we need the damping coefficient "c", to compute its value from the
# damping ratio we must first compute the undamped natural frequency
wn=sqrt(k/mass) # natural frequency of the undamped system
damp=2*mass*wn*zeta # the damping coefficient
# the time step
h=0.005
# required duration for the response
t_end = 4.0
# the number of time steps to arrive at t_end
nsteps=int((t_end+h/100)/h)+1
# the maximum number of iterations in the Newton-Raphson procedure
maxiters = 30
# using the constant acceleration algorithm
# below we define the relevant algorithmic constants
gamma=0.5
beta=1./4.
gb=gamma/beta
a=mass/(beta*h)+damp*gb
b=0.5*mass/beta+h*damp*(0.5*gb-1.0)
t0=0.0
u0=0.0
up=0.0
v0=0.0
p0=p(t0)
(k0, up)=kt(u0,v0,up)
a0=(p0-damp*v0-k0*(u0-up))/mass
time = []; disp = []
for i in range(nsteps):
time.append(t0); disp.append(u0)
# advance time, next external load value, etc
t1 = t0 + h
p1 = p(t1)
Dp = p1 - p0
Dp_= Dp + a*v0 + b*a0
k_ = k0 + gb*damp/h + mass/(beta*h*h)
# We prepare the machinery for the modified Newton-Raphson algorithm.
# If we have no state change in the time step N-R algorithm is equivalent to the standard procedure
u_init=u0; v_init=v0 # initial state
f_spring=k*(u0-up) # the force in the spring
DR=Dp_ # the unbalanced force, initially equal to the external load increment
for j in range(maxiters):
Du=DR/k_ # the disp increment according to the initial stiffness
u_next = u_init + Du
v_next = v_init + gb*Du/h - gb*v_init + h*(1.0-0.5*gb)*a0
# we are interested in the total plastic elongation
oops,up=kt(u_next,v_next,up)
# because we need the spring force at the end of the time step
f_spring_next=k*(u_next-up)
# so that we can compute the fraction of the incremental force
# that's equilibrated at the end of the time step
df=f_spring_next-f_spring+(k_-k0)*Du
# and finally the incremental forces still unbalanced the end of the time step
DR=DR-df
# finish updating the system state
u_init=u_next; v_init=v_next; f_spring=f_spring_next
# if the unbalanced load is small enough (the criteria used in practical
# programs are energy based) exit the loop - if we
# have no plasticization/unloading DR==0 at the end of the first iteration
if abs(DR)<fy*1E-6: break
# now the load increment is balanced by the spring force and increments in inertial and damping forces,
# we need to compute the full state at the end of the time step, and to change all denominations
# to reflect the fact that we are starting a new time step.
Du=u_init-u0
Dv=gamma*Du/(beta*h)-gamma*v0/beta+h*(1.0-0.5*gamma/beta)*a0
u1=u0+Du ; v1=v0+Dv
k1,up=kt(u1,v1,up)
a1=(p(t1)-damp*v1-k*(u1-up))/mass
t0=t1; v0=v1; u0=u1 ; a0=a1 ; k0=k1 ; p0=p1
figure(8)
plot(time[::4],disp[::4],'xr')
plot(t_el, x_el, '-b',
t_y1, x_y1, '-r',
t_y2, x_y2, '-r',
t_e2, x_e2, '-b',
(0.6, 4.0), (x0-xy, x0-xy), '--y')
title("Continuous line: exact response.\n"+
"Red crosses: constant acceleration + MNR.\n")
xlabel("Time, s")
ylabel("Displacement, m");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plastic response
Step2: An utility function
Step3: The system parameters
Step4: Derived quantities
Step5: Load definition
Step6: The actual computations
Step7: Yielding time is
Step8: Forced response in elastic range is
Step9: Preparing for EP response
Step10: now, the load must be expressed in function of a restarted time,
Step11: Now we generate the displacement and velocity functions for the yielded phase,
Step12: At this point I must confess that I have already peeked the numerical solution,
Step13: In the next phase, still it is $\dot x> 0$ so that
Step14: Elastic unloading
Step15: now we are ready to plot the whole response
Step17: Numerical solution
Step18: and also a function that, given the displacement,
Step19: Problem data
Step20: Initialize the algorithm
Step21: System state initialization
Step22: Iteration
Step23: Plotting our results
|
6,220
|
<ASSISTANT_TASK:>
Python Code:
a = 3
print(type(a))
b = [1, 2.5, 'This is a string']
print(type(b))
c = 'Hello world!'
print(type(c))
a = [1, 2, 3, 4]
print('This is the zeroth value in the list: {}'.format(a[0]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you come from a background of matlab, remember that indexing in python
|
6,221
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
def plot_all(m, d, m_est, d_pred):
Helper function for plotting. You can ignore this.
fig = plt.figure(figsize=(10,6))
ax0 = fig.add_subplot(2,2,1)
ax0.imshow(m, cmap='viridis', aspect='auto')
ax0.set_title("$\mathrm{Model}\ m$")
ax1 = fig.add_subplot(2,2,2)
ax1.imshow(d, cmap='viridis', aspect='auto')
ax1.set_title("$\mathrm{Data}\ d$")
ax2 = fig.add_subplot(2,2,3)
ax2.imshow(m_est, cmap='viridis', aspect='auto')
ax2.set_title("$\mathrm{Estimated\ model}\ m_\mathrm{est}$")
ax3 = fig.add_subplot(2,2,4)
ax3.imshow(d_pred, cmap='viridis', aspect='auto')
ax3.set_title("$\mathrm{Predicted\ data}\ d_\mathrm{pred}$")
plt.show()
import scipy.linalg
def convmtx(h, n):
Equivalent of MATLAB's convmtx function, http://www.mathworks.com/help/signal/ref/convmtx.html.
Makes the convolution matrix, C. The product C.x is the convolution of h and x.
Args
h (ndarray): a 1D array, the kernel.
n (int): the number of rows to make.
Returns
ndarray. Size m+n-1
col_1 = np.r_[h[0], np.zeros(n-1)]
row_1 = np.r_[h, np.zeros(n-1)]
return scipy.linalg.toeplitz(col_1, row_1)
M = 50
m = np.zeros((M+1, 1)) # Have to do +1 because we're going to lose one in computing RC series.
m[10:15,:] = 1.0
m[15:27,:] = -0.3
m[27:35,:] = 2.1
m = (m[1:] - m[:-1]) / (m[1:] + m[:-1] + 1e-9) # Small number avoid division by zero.
m = np.repeat(m, 50, axis=-1)
plt.imshow(m, cmap='viridis')
N = 20
L = 100
alpha = 0.08
x = np.arange(0, M, 1) * L/(M-1)
dx = L/(M-1)
r = np.arange(0, N, 1) * L/(N-1)
G = np.zeros((N, M))
for j in range(M):
for k in range(N):
G[k,j] = dx * np.exp(-alpha * np.abs(r[k] - x[j])**2)
d = G @ m
plt.imshow(d, cmap='viridis')
m_est = G.T @ la.inv(G @ G.T) @ d
d_pred = G @ m_est
plot_all(m, d, m_est, d_pred)
m_est = la.lstsq(G, d)[0]
d_pred = G @ m_est
plot_all(m, d, m_est, d_pred)
from io import BytesIO
import requests
from urllib.parse import quote
text = '+ ='
url = "https://chart.googleapis.com/chart"
params = {'chst': 'd_text_outline',
'chld': '000000|36|h|000000|_|{}'.format(text),
}
r = requests.get(url, params)
b = BytesIO(r.content)
img = mpimg.imread(b)
m = np.pad(img[...,3], 20, 'constant')
plt.imshow(m, cmap='viridis')
m = (m[1:] - m[:-1]) / (m[1:] + m[:-1] + 1e-9) # Small number avoid division by zero.
M = m.shape[0]
N = 20
L = 100
alpha = 0.08
x = np.arange(0, M, 1) * L/(M-1)
dx = L/(M-1)
r = np.arange(0, N, 1) * L/(N-1)
G = np.zeros((N, M))
for j in range(M):
for k in range(N):
G[k,j] = dx * np.exp(-alpha * np.abs(r[k] - x[j])**2)
d = G @ m
plt.imshow(d, cmap='viridis')
m_est = G.T @ la.inv(G @ G.T) @ d
d_pred = G @ m_est
plot_all(m, d, m_est, d_pred)
m_est = la.lstsq(G, d)[0]
d_pred = G @ m_est
plot_all(m, d, m_est, d_pred)
s = 1
d += s * np.random.random(d.shape)
I = np.eye(N)
µ = 2.5
m_est = G.T @ la.inv(G @ G.T + µ * I) @ d
d_pred = G @ m_est
plot_all(m, d, m_est, d_pred)
W = convmtx([1,-1], M)[:,:-1] # Skip last column
m_est = la.inv(G.T @ G + µ * W.T @ W) @ G.T @ d
d_pred = G @ m_est
plot_all(m, d, m_est, d_pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Linear inverse solutions in NumPy
Step3: Mauricio's 1D problem in 2D
Step4: Form the discrete kernel, G.
Step5: Compute the data; this is the forward problem.
Step6: Minimum least squares solution
Step7: We can also just use NumPy's solver directly.
Step8: More complex models
Step9: I changed the shape of m so we have to make a new G.
Step10: Forward model the data.
Step11: Noise-free
Step12: Solve with LAPACK
Step13: With noise
Step14: With noise
Step15: Now we solve
|
6,222
|
<ASSISTANT_TASK:>
Python Code:
import logging # python logging module
# basic format for logging
logFormat = "%(asctime)s - [%(levelname)s] (%(funcName)s:%(lineno)d) %(message)s"
# logs will be stored in tweepy.log
logging.basicConfig(filename='tweepytrends.log', level=logging.INFO,
format=logFormat, datefmt="%Y-%m-%d %H:%M:%S")
import tweepy # importing all the modules required
import socket # will be used to create sockets
import json # manipulate json
from httplib import IncompleteRead
# Keep these tokens secret, as anyone can have full access to your
# twitter account, using these tokens
consumerKey = "#"
consumerSecret = "#"
accessToken = "#-#"
accessTokenSecret = "#"
# Performing the authentication and authorization, post this step
# we will have full access to twitter api's
def connectToTwitter():
Connect to twitter.
try:
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
logging.info("Successfully logged in to twitter.")
return api, auth
except Exception as e:
logging.info("Something went wrong in oauth, please check your tokens.")
logging.error(e)
# Tweet listner class which subclasses from tweepy.StreamListener
class TweetListner(tweepy.StreamListener):
Twitter stream listner
def __init__(self, csocket):
self.clientSocket = csocket
def on_data(self, raw_data):
Called when raw data is received from connection.
return False to stop stream and close connection.
try:
msg = json.loads(raw_data)
self.clientSocket.send(msg['text'].encode('utf-8'))
return True
except Exception as e:
logging.error("An unhandled exception has occured, check your data processing")
logging.error(e)
raise e
def on_error(self, status_code):
Called when a non-200 status code is returned
logging.error("A non-200 status code is returned")
return True
def on_exception(self, exception):
Called when an unhandled exception occurs.
logging.error("An unhandled exception has occured")
raise exception
# Creating a proxy socket
def createProxySocket(host, port):
Returns a socket which can be used to connect
to spark.
try:
s = socket.socket() # initialize socket instance
s.bind((host, port)) # bind to the given host and port
s.listen(5) # Enable a server to accept connections.
logging.info("Listening on the port {}".format(port))
cSocket, address = s.accept() # waiting for a connection
logging.info("Received Request from: {}".format(address))
return cSocket
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE: # Address in use
logging.error("The given host:port {}:{} is already in use"\
.format(host, port))
logging.info("Trying on port: {}".format(port + 1))
return createProxySocket(host, port + 1)
if __name__ == "__main__":
try:
api, auth = connectToTwitter() # connecting to twitter
host = "localhost"
port = 8878
cSocket = createProxySocket(host, port) # Creating a socket
while True:
try:
# Connect/reconnect the stream
tweetStream = tweepy.Stream(auth, TweetListner(cSocket)) # Stream the twitter data
# DON'T run this approach async or you'll just create a ton of streams!
tweetStream.filter(track=["Iphone", "iphone"]) # Filter on trending topics
except IncompleteRead:
# Oh well, reconnect and keep trucking
continue
except KeyboardInterrupt:
# Or however you want to exit this loop
tweetStream.disconnect()
break
except Exception as e:
logging.error("Unhandled exception has occured inside while")
logging.error(e)
continue
except KeyboardInterrupt: # Keyboard interrupt called
logging.error("KeyboardInterrupt was hit")
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication and Authorisation
Step3: Post this step, we will have full access to twitter api's
Step9: Streaming with tweepy
|
6,223
|
<ASSISTANT_TASK:>
Python Code:
#@title
# Copyright 2020 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Install and import Trax
!pip install -q -U git+https://github.com/google/trax@master
import os
import numpy as np
import trax
# Use the tensorflow-numpy backend.
trax.fastmath.set_backend('tensorflow-numpy')
print(trax.fastmath.backend_name())
# Create data streams.
train_stream = trax.data.TFDS('imdb_reviews', keys=('text', 'label'), train=True)()
eval_stream = trax.data.TFDS('imdb_reviews', keys=('text', 'label'), train=False)()
data_pipeline = trax.data.Serial(
trax.data.Tokenize(vocab_file='en_8k.subword', keys=[0]),
trax.data.Shuffle(),
trax.data.FilterByLength(max_length=2048, length_keys=[0]),
trax.data.BucketByLength(boundaries=[ 32, 128, 512, 2048],
batch_sizes=[512, 128, 32, 8, 1],
length_keys=[0]),
trax.data.AddLossWeights()
)
train_batches_stream = data_pipeline(train_stream)
eval_batches_stream = data_pipeline(eval_stream)
# Print example shapes.
example_batch = next(train_batches_stream)
print(f'batch shapes = {[x.shape for x in example_batch]}')
# Create the model.
from trax import layers as tl
model = tl.Serial(
tl.Embedding(vocab_size=8192, d_feature=256),
tl.Mean(axis=1), # Average on axis 1 (length of sentence).
tl.Dense(2), # Classify 2 classes.
)
# You can print model structure.
print(model)
# Train the model.
from trax.supervised import training
# Training task.
train_task = training.TrainTask(
labeled_data=train_batches_stream,
loss_layer=tl.WeightedCategoryCrossEntropy(),
optimizer=trax.optimizers.Adam(0.01),
n_steps_per_checkpoint=500,
)
# Evaluaton task.
eval_task = training.EvalTask(
labeled_data=eval_batches_stream,
metrics=[tl.WeightedCategoryCrossEntropy(), tl.WeightedCategoryAccuracy()],
n_eval_batches=20 # For less variance in eval numbers.
)
# Training loop saves checkpoints to output_dir.
output_dir = os.path.expanduser('~/output_dir/')
training_loop = training.Loop(model,
train_task,
eval_tasks=[eval_task],
output_dir=output_dir)
# Run 2000 steps (batches).
training_loop.run(2000)
# Run on an example.
example_input = next(eval_batches_stream)[0][0]
example_input_str = trax.data.detokenize(example_input, vocab_file='en_8k.subword')
print(f'example input_str: {example_input_str}')
sentiment_activations = model(example_input[None, :]) # Add batch dimension.
print(f'Model returned sentiment activations: {np.asarray(sentiment_activations)}')
# Convert the model into a Keras layer, use the weights from model.
keras_layer = trax.AsKeras(model)
print(keras_layer)
# Run the Keras layer to verify it returns the same result.
sentiment_activations = keras_layer(example_input[None, :])
print(f'Keras returned sentiment activations: {np.asarray(sentiment_activations)}')
import tensorflow as tf
# Create a full Keras model using the layer from Trax.
inputs = tf.keras.Input(shape=(None,), dtype='int32')
hidden = keras_layer(inputs)
# You can add other Keras layers here operating on hidden.
outputs = hidden
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
print(keras_model)
# Run the Keras model to verify it returns the same result.
sentiment_activations = keras_model(example_input[None, :])
print(f'Keras returned sentiment activations: {np.asarray(sentiment_activations)}')
# Save the Keras model to output_dir.
model_file = os.path.join(output_dir, "model_checkpoint")
keras_model.save(model_file)
# Load the model from SavedModel.
loaded_model = tf.keras.models.load_model(model_file)
# Run the loaded model to verify it returns the same result.
sentiment_activations = loaded_model(example_input[None, :])
print(f'Keras returned sentiment activations: {np.asarray(sentiment_activations)}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here is how you can set the fastmath backend to tensorflow-numpy and verify that it's been set.
Step2: 2. Convert Trax to Keras
Step3: 3. Exporting Trax Models for Deployment
|
6,224
|
<ASSISTANT_TASK:>
Python Code:
from statistics import mean
def occ(n):
"The expected occupancy for a row of n houses (under misanthrope rules)."
return (0 if n == 0 else
1 if n == 1 else
mean(occ(L) + 1 + occ(R)
for (L, R) in runs(n)))
def runs(n):
A list [(L, R), ...] where the i-th tuple contains the lengths of the runs
of acceptable houses to the left and right of house i.
return [(max(0, i - 1), max(0, n - i - 2))
for i in range(n)]
def density(n): return occ(n) / n
occ(4)
runs(7)
occ(7)
density(7)
def occ(n, cache=[0, 1]):
"The expected occupancy for a row of n houses (under misanthrope rules)."
# Store occ(i) in cache[i] for all as-yet-uncomputed values of i up to n:
for i in range(len(cache), n+1):
cache.append(mean(cache[L] + 1 + cache[R]
for (L, R) in runs(i)))
return cache[n]
occ(4) == 2
density(7)
%time occ(2000)
%time occ(2000)
%matplotlib inline
import matplotlib.pyplot as plt
def plot_density(ns):
"Plot density(n) for each n in the list of numbers ns."
plt.xlabel('n houses'); plt.ylabel('density(n)')
plt.plot(ns, [density(n) for n in ns], 's-')
return density(ns[-1])
plot_density(range(1, 100))
plot_density(range(1, 11))
plot_density(range(100, 4000, 50))
def diff(n, m): return density(n) - density(m)
diff(100, 200)
diff(200, 400)
diff(100, 200) / diff(200, 400)
n = 500; diff(n, 2*n) / diff(2*n, 4*n)
n = 1000; diff(n, 2*n) / diff(2*n, 4*n)
from scipy.optimize import curve_fit
Ns = list(range(100, 10001, 100))
def f(n, A, B): return A + B / n
((A, B), covariance) = curve_fit(f=f, xdata=Ns, ydata=[density(n) for n in Ns])
covariance
A, B
def estimated_density(n): return A + B / n
max(abs(density(n) - estimated_density(n))
for n in range(200, 4000))
from math import sinh, exp, e
S = sinh(1) / exp(1)
E = 0.5 * (1 - e ** (-2))
assert S == E
S, E, A
import random
def simulate(n):
"Simulate moving in to houses, and return a sorted tuple of occupied houses."
occupied = set()
for house in random.sample(range(n), n):
if (house - 1) not in occupied and (house + 1) not in occupied:
occupied.add(house)
return sorted(occupied)
def simulated_density(n, repeat=10000):
"Estimate density by simulation, repeated `repeat` times."
return mean(len(simulate(n)) / n
for _ in range(repeat))
print(' n simul density estimated')
for n in (25, 50, 100, 200, 400):
print('{:3} {:.3} {:.3} {:.3}'
.format(n, simulated_density(n), density(n), estimated_density(n)))
simulate(7)
from collections import Counter
Counter(tuple(simulate(7)) for _ in range(10000))
def test():
assert occ(0) == 0
assert occ(1) == occ(2) == 1
assert occ(3) == 5/3
assert density(3) == occ(3) / 3
assert density(100) == occ(100) / 100
assert runs(3) == [(0, 1), (0, 0), (1, 0)]
assert runs(7) == [(0, 5), (0, 4), (1, 3), (2, 2), (3, 1), (4, 0), (5, 0)]
for n in (3, 7, 10, 20, 100, 101, 200, 201):
for repeat in range(500):
assert_valid(simulate(n), n)
return 'ok'
def assert_valid(occupied, n):
Assert that, in this collection of occupied houses, no house is adjacent to an
occupied house, and every unoccupied position is adjacent to an occupied house.
occupied = set(occupied) # coerce to set
for house in range(n):
if house in occupied:
assert (house - 1) not in occupied and (house + 1) not in occupied
else:
assert (house - 1) in occupied or (house + 1) in occupied
test()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Puzzle of the Misanthropic Neighbors
Step2: Let's check that occ(4) is 2, as we computed it should be
Step3: And that runs(7) is what we described above
Step4: Let's check on n = 7
Step5: That seems reasonable, but I don't know for sure that it is correct.
Step6: Let's make sure this new version gets the same results as the old version
Step7: Let's make sure the caching makes computation faster the second time
Step8: Plotting density(n)
Step9: There is something funny going on with the first few values of n. Let's separately look at the first few
Step10: And at a wider range
Step11: The density is going down, and the curve is almost but not quite flat.
Step12: And compared that to the difference when you double n again
Step13: Hmm—I noticed that the first difference is just about twice as much as the second. Let's check
Step14: Wow—not only is it close to twice as much, it is exactly twice as much (to the precision of floating point numbers). Let's try other starting values for n
Step15: OK, I'm convinced this is real!
Step16: The curve_fit function returns a sequence of parameter values, and a covariance matrix. The fact that all the numbers in the covariance matrix are really small indicates that the parameters are a really good fit. Here are the parameters, A and B
Step17: We can plug them into a function that estimates the density
Step18: And we can test how close this function is to the true density function
Step19: That says that, for all values of n from 200 to 4,000, density(n) and estimated_density(n) agree at least through the first 15 decimal places!
Step20: So I now have a suspicion that
Step21: Let's see if the simulation returns results that match the actual density function and the estimated_density function
Step22: We got perfect agreement (at least to 3 decimal places), suggesting that either our three implementations are all correct, or we've made mistakes in all three.
Step23: Let's repeat that multiple times, and store the results in a Counter, which tracks how many times it has seen each result
Step25: That says that about 1/3 of the time, things work out so that the 4 even-numbered houses are occupied. But if anybody ever chooses an odd-numbered house, then we are destined to have 3 houses occupied (in one of 6 different ways, of which (1, 3 5) is the most common, probably because it is the only one that has three chances of getting started with an odd-numbered house).
|
6,225
|
<ASSISTANT_TASK:>
Python Code:
%load_ext watermark
%watermark -u -v -d -p matplotlib,numpy
%matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import pyplot as plt
# Generate some 3D sample data
mu_vec1 = np.array([0,0,0]) # mean vector
cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]]) # covariance matrix
class1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20)
class2_sample = np.random.multivariate_normal(mu_vec1 + 1, cov_mat1, 20)
class3_sample = np.random.multivariate_normal(mu_vec1 + 2, cov_mat1, 20)
# class1_sample.shape -> (20, 3), 20 rows, 3 columns
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(class1_sample[:,0], class1_sample[:,1], class1_sample[:,2],
marker='x', color='blue', s=40, label='class 1')
ax.scatter(class2_sample[:,0], class2_sample[:,1], class2_sample[:,2],
marker='o', color='green', s=40, label='class 2')
ax.scatter(class3_sample[:,0], class3_sample[:,1], class3_sample[:,2],
marker='^', color='red', s=40, label='class 3')
ax.set_xlabel('variable X')
ax.set_ylabel('variable Y')
ax.set_zlabel('variable Z')
plt.title('3D Scatter Plot')
plt.show()
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
# Generate some example data
mu_vec1 = np.array([0,0,0])
cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20)
mu_vec2 = np.array([1,1,1])
cov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class2_sample = np.random.multivariate_normal(mu_vec2, cov_mat2, 20)
# concatenate data for PCA
samples = np.concatenate((class1_sample, class2_sample), axis=0)
# mean values
mean_x = np.mean(samples[:,0])
mean_y = np.mean(samples[:,1])
mean_z = np.mean(samples[:,2])
#eigenvectors and eigenvalues
eig_val, eig_vec = np.linalg.eig(cov_mat1)
################################
#plotting eigenvectors
################################
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.plot(samples[:,0], samples[:,1], samples[:,2], 'o', markersize=10, color='green', alpha=0.2)
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
for v in eig_vec.T:
a = Arrow3D([mean_x, v[0]], [mean_y, v[1]],
[mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
ax.add_artist(a)
ax.set_xlabel('variable X')
ax.set_ylabel('variable Y')
ax.set_zlabel('variable Z')
plt.title('3D scatter plot with eigenvectors')
plt.show()
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from itertools import product, combinations
fig = plt.figure(figsize=(7,7))
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
# Plot Points
# samples within the cube
X_inside = np.array([[0,0,0],[0.2,0.2,0.2],[0.1, -0.1, -0.3]])
X_outside = np.array([[-1.2,0.3,-0.3],[0.8,-0.82,-0.9],[1, 0.6, -0.7],
[0.8,0.7,0.2],[0.7,-0.8,-0.45],[-0.3, 0.6, 0.9],
[0.7,-0.6,-0.8]])
for row in X_inside:
ax.scatter(row[0], row[1], row[2], color="r", s=50, marker='^')
for row in X_outside:
ax.scatter(row[0], row[1], row[2], color="k", s=50)
# Plot Cube
h = [-0.5, 0.5]
for s, e in combinations(np.array(list(product(h,h,h))), 2):
if np.sum(np.abs(s-e)) == h[1]-h[0]:
ax.plot3D(*zip(s,e), color="g")
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
ax.set_zlim(-1.5, 1.5)
plt.show()
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.mlab import bivariate_normal
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
x = np.linspace(-5, 5, 200)
y = x
X,Y = np.meshgrid(x, y)
Z = bivariate_normal(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(0, 0.2)
ax.zaxis.set_major_locator(plt.LinearLocator(10))
ax.zaxis.set_major_formatter(plt.FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=7, cmap=plt.cm.coolwarm)
plt.show()
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.mlab import bivariate_normal
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
x = np.linspace(-5, 5, 200)
y = x
X,Y = np.meshgrid(x, y)
Z = bivariate_normal(X, Y)
surf = ax.plot_wireframe(X, Y, Z, rstride=4, cstride=4, color='g', alpha=0.7)
ax.set_zlim(0, 0.2)
ax.zaxis.set_major_locator(plt.LinearLocator(10))
ax.zaxis.set_major_formatter(plt.FormatStrFormatter('%.02f'))
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('p(x)')
plt.title('bivariate Gassian')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font size="1.5em">More info about the %watermark extension</font>
Step2: <br>
Step3: <br>
Step4: <br>
Step5: <br>
Step6: <br>
|
6,226
|
<ASSISTANT_TASK:>
Python Code:
try:
import verta
except ImportError:
!pip install verta
HOST = "app.verta.ai"
PROJECT_NAME = "Spam Detection"
EXPERIMENT_NAME = "tf–idf"
# import os
# os.environ['VERTA_EMAIL'] =
# os.environ['VERTA_DEV_KEY'] =
from __future__ import print_function
import json
import os
import re
import time
import cloudpickle
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, precision_recall_curve, confusion_matrix, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
try:
import wget
except ImportError:
!pip install wget # you may need pip3
import wget
train_data_url = "http://s3.amazonaws.com/verta-starter/spam.csv"
train_data_filename = wget.detect_filename(train_data_url)
if not os.path.isfile(train_data_filename):
wget.download(train_data_url)
raw_data = pd.read_csv(train_data_filename, delimiter=',', encoding='latin-1')
raw_data.head()
# turn spam/ham to 0/1, and remove unnecessary columns
raw_data.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'],axis=1,inplace=True)
raw_data.v1 = LabelEncoder().fit_transform(raw_data.v1)
raw_data.head()
# lemmatize text
total_stopwords = set([word.replace("'",'') for word in stopwords.words('english')])
lemma = WordNetLemmatizer()
def preprocess_text(text):
text = text.lower()
text = text.replace("'",'')
text = re.sub('[^a-zA-Z]',' ',text)
words = text.split()
words = [lemma.lemmatize(word) for word in words if (word not in total_stopwords) and (len(word)>1)] # Remove stop words
text = " ".join(words)
return text
raw_data.v2 = raw_data.v2.apply(preprocess_text)
raw_data.head()
x_train, x_test, y_train, y_test = train_test_split(raw_data.v2, raw_data.v1, test_size=0.15, stratify=raw_data.v1)
from verta import Client
from verta.utils import ModelAPI
client = Client(HOST)
proj = client.set_project(PROJECT_NAME)
expt = client.set_experiment(EXPERIMENT_NAME)
run = client.set_experiment_run()
vectorizer = TfidfVectorizer()
vectorizer.fit(x_train)
x_train_vec = vectorizer.transform(x_train).toarray()
model = linear_model.LogisticRegression()
model.fit(x_train_vec, y_train)
x_test_vec = vectorizer.transform(x_test).toarray()
y_pred = model.predict(x_test_vec)
m_confusion_test = confusion_matrix(y_test, y_pred)
display(pd.DataFrame(data=m_confusion_test,
columns=['Predicted 0', 'Predicted 1'],
index=['Actual 0', 'Actual 1']))
print("This model misclassifies {} genuine SMS as spam"
" and misses only {} SPAM.".format(m_confusion_test[0,1], m_confusion_test[1,0]))
accuracy = accuracy_score(y_test, y_pred)
run.log_metric("accuracy", accuracy)
accuracy
# save and upload weights
model_param = {}
model_param['coef'] = model.coef_.reshape(-1).tolist()
model_param['intercept'] = model.intercept_.tolist()
json.dump(model_param, open("weights.json", "w"))
run.log_artifact("weights", open("weights.json", "rb"))
# serialize and upload vectorizer
run.log_artifact("vectorizer", vectorizer)
class SpamModel():
def __init__(self, artifacts):
from nltk.corpus import stopwords # needs to be re-imported to remove local file link
# get artifact filepaths from `artifacts` mapping
weights_filepath = artifacts['weights']
vectorizer_filepath = artifacts['vectorizer']
# load artifacts
self.weights = json.load(open(weights_filepath, "r"))
self.vectorizer = cloudpickle.load(open(vectorizer_filepath, "rb"))
# reconstitute logistic regression
self.coef_ = np.array(self.weights["coef"])
self.intercept_ = self.weights["intercept"]
# configure text preprocessing
self.total_stopwords = set([word.replace("'",'') for word in stopwords.words('english')])
self.lemma = WordNetLemmatizer()
def preprocess_text(self, text):
text = text.lower()
text = text.replace("'",'')
text = re.sub('[^a-zA-Z]',' ',text)
words = text.split()
words = [self.lemma.lemmatize(word) for word in words if (word not in self.total_stopwords) and (len(word)>1)] # Remove stop words
text = " ".join(words)
return text
def predict(self, data):
predictions = []
for inp in data:
# preprocess input
processed_text = self.preprocess_text(inp)
inp_vec = self.vectorizer.transform([inp]).toarray()
# make prediction
prediction = (np.dot(inp_vec.reshape(-1), self.coef_.reshape(-1)) + self.intercept_)[0]
predictions.append(prediction)
return predictions
artifacts = run.fetch_artifacts(["weights", "vectorizer"])
spam_model = SpamModel(artifacts=artifacts)
spam_model.predict(["FREE FREE FREE"])
run.log_model(
model=SpamModel,
artifacts=['weights', 'vectorizer'],
)
run.log_requirements([
"cloudpickle",
"nltk",
"numpy",
"sklearn",
])
run.log_setup_script(
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
)
run
from verta.deployment import DeployedModel
deployed_model = DeployedModel(HOST, run.id)
for text in x_test:
print(deployed_model.predict([text]))
time.sleep(.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This example features
Step2: Imports
Step3: Run Workflow
Step4: Instantiate Client
Step5: Fit Model
Step6: Define Model Class
Step7: Earlier we logged artifacts with the keys "weights" and "vectorizer".
Step8: Log Model
Step9: We also have to make sure we provide every package involved in the model.
Step11: And we need to ensure that the appropriate NLTK packages are available during deployment.
Step12: Make Live Predictions
Step13: Load Deployed Model
Step14: Query Deployed Model
|
6,227
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
list1 = [1, 2, 3, 4, 5] # Define a list
array1 = np.array(list1) # Pass the list to np.array()
type(array1) # Check the object's type
print("array1 = ", array1) # Check the content of the array (printing in Python 3)
print ("array1 = %s" % np.array_str(array1)) # Check the content of the array (printing in Python 2)
list2 = [[1,2,3,4,5], [6,7,8,9,10]]
array2 = np.array(list2)
print("array2 = ", array2) # Python 3
print("array2 = %s" % np.array_str(array2)) # Python 2 and 3
array2.shape
print("array2 has", array2.size ,"items of type", array2.dtype) # Python 3
print ("array2 has %d items of type %s" % (array2.size, array2.dtype)) # Python 2 and 3
np.identity(n = 3) # n is the size of the square 2-d array
np.eye(3, # Number of rows
5, # Number of columns
1) # Index of the diagonal (main diagonal, 0, is the default)
# np.ones() to create an array filled with ones:
np.ones(shape= [2,3])
# np.zeros() to create an array filled with zeros:
np.zeros(shape= [3,4])
d_array = np.array([1,2,3,4,5,6])
d_array[2] # Get the item at index 2
d_array[4:] # Get a slice from index 3 to the end
d_array[::-1] # shortcut to reverse the array
# Create a new two dimensional array
dd_array = np.array([d_array, d_array + 10, d_array + 100])
print(dd_array)
# Get the element on row 2, and column 3
dd_array[2, 3]
# Slice elements starting at row 1, and column 3
dd_array[1:, 4:]
#Reverse the array in both dimensions (rotation)
dd_array[::-1, ::-1]
np.reshape(dd_array, # Array to reshape
newshape=(2,9)) # Dimensions of the new array
np.ravel(dd_array, # Array to reshape
order='C') # Unravel by rows
np.ravel(dd_array,
order='F') # Unravel by columns
dd_array.flatten() #flatten a multi-dimensional array into 1 dimension and return a copy of the result
dd_array.T #get the transpose
np.flipud(dd_array) #Flip an array vertically
np.fliplr(dd_array) #Flip an array horizontally
np.rot90(dd_array, # Rotate the array 90 degrees counter-clockwise
k=1) # Number of 90 degree rotations
np.roll(dd_array, # Shift elements in an array along a given dimension
shift = 2, # Shift elements 2 positions
axis = 1) # In each row
np.roll(dd_array, #Leave the axis argument empty to shift across all dimensions
shift = 2)
#Join arrays along an axis
array_to_join = np.array([[10,20,30],[40,50,60],[70,80,90]])
np.concatenate((dd_array,array_to_join), # Arrays to join
axis=1) # Axis to join upon
dd_array + 10 # Add 10 to each element
dd_array - 10 # Subtract 10 from each element
dd_array * 2 # Multiply each element by 2
dd_array ** 2 # Square each element
array3 = np.array([[1,2],[3,4]])
array3 + array3
array3 - array3
array3 * array3
np.mean(dd_array) # The mean of all the elements in an array
np.std(dd_array) #Get the standard deviation all the elements in an array
np.sum(dd_array,
axis=1) # Get the row sums for the elements of an array
np.sum(dd_array,
axis=0) # Get the column sums
np.sqrt(dd_array) # Take the square root of each element in the array
np.dot(dd_array[0,0:], # Slice row 0
dd_array[1,0:]) # Slice row 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can create an ndarray by passing a list to the np.array() function
Step2: To create an array with more than one dimension, we can pass a nested list to the np.array() function
Step3: The parameters of an ndarray include the number of dimensions it has, the size of each dimension and the type of data it contains. We can check the dimensions of an ndarray with the shape attribute
Step4: The output above shows that array2 is a 2-dimensional array with 2 rows and 5 columns. We can check the size (total number of items) of an array with the size attribute and the type of the data it contains with the dtype attribute
Step5: Numpy has several functions for creating arrays, such as
Step6: np.eye() to create a 2d array with 1's across a specified diagonal and 0's everywhere else
Step7: Array Indexing and Slicing
Step8: If an ndarray has more than one dimension, separate indexes for each dimension with a comma
Step9: Reshaping Arrays
Step10: Unravel a multi-dimensional into 1 dimension with np.ravel()
Step11: Array Math Operations
Step12: One can also use the basic math operators on two arrays with the same shape. The basic math operators function in an element-wise fashion, returning an array with the same shape as the original.
Step13: Numpy also provides math functions for ndarrays such as
Step14: T np.dot() returns the dot product of two arrays.
|
6,228
|
<ASSISTANT_TASK:>
Python Code:
dot = Digraph(comment='Design of Experiments')
dot.body.extend(['rankdir=LR', 'size="10,10"'])
dot.node_attr.update(shape='rectangle', style='filled', fontsize='20', fontname="helvetica")
dot.node('X', 'Controllable Factors', color='mediumseagreen', width='3')
dot.node('Z', 'Noise Factors', color='indianred2', width='3')
dot.node('P', 'Process', color='lightblue', height='1.25', width='3')
dot.node('Y', 'Responses', color='lightblue')
dot.edges(['XP', 'ZP', 'PY'] * 3)
dot
# set some variables related to the coffee data set
actual_lows = { 'amount' : 2.5, 'grind_size' : 8, 'brew_time': 3.5, 'grind_type': 'burr', 'beans': 'light' }
actual_highs = { 'amount' : 4, 'grind_size' : 10, 'brew_time': 4.5, 'grind_type': 'blade', 'beans': 'dark' }
units = { 'amount' : 'oz', 'grind_size' : 'mm', 'brew_time': 'm', 'grind_type': '', 'beans': '' }
points = [
[-1, -1],
[-1, 1],
[1, -1],
[-1, -1],
[-1, 1],
[1, -1],
]
df = pd.DataFrame(points, columns=['grind_size', 'amount'])
fg = sns.lmplot('amount', 'grind_size', data=df, fit_reg=False)
p = patches.Polygon(points, color="navy", alpha=0.3, lw=2)
ax = fg.axes[0, 0]
ax.add_patch(p)
ax.set_xticks([-1, 1])
ax.set_xticklabels(get_tick_labels('amount', actual_lows, actual_highs, units))
ax.set_yticks([-1, 1])
ax.set_yticklabels(get_tick_labels('grind_size', actual_lows, actual_highs, units))
p = patches.Polygon([[-1, 1], [1, 1], [1, -1]], color="firebrick", alpha=0.3, lw=2)
p = ax.add_patch(p)
cube_design = dexpy.factorial.build_factorial(3, 8)
points = np.array(cube_design)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', axisbg='w')
ax.view_init(30, -60) # rotate plot
X, Y = np.meshgrid([-1,1], [-1,1])
cube_alpha = 0.1
ax.plot_surface(X, Y, 1, alpha=cube_alpha, color="r")
ax.plot_surface(X, Y, -1, alpha=cube_alpha)
ax.plot_surface(X, -1, Y, alpha=cube_alpha)
ax.plot_surface(X, 1, Y, alpha=cube_alpha, color="r")
ax.plot_surface(1, X, Y, alpha=cube_alpha, color="r")
ax.plot_surface(-1, X, Y, alpha=cube_alpha)
ax.scatter3D(points[:, 0], points[:, 1], points[:, 2],
c=["b", "b", "b", "r", "b", "r", "r", "r"])
ax.set_xticks([-1, 1])
ax.set_xticklabels(get_tick_labels('grind_size', actual_lows, actual_highs, units))
ax.set_yticks([-1, 1])
ax.set_yticklabels(get_tick_labels('amount', actual_lows, actual_highs, units))
ax.set_zticks([-1, 1])
ax.set_zticklabels(get_tick_labels('beans', actual_lows, actual_highs, units))
plt.show()
df = dexpy.factorial.build_factorial(2, 4)
df.columns = ['amount', 'grind_size']
fg = sns.lmplot('amount', 'grind_size', data=df, fit_reg=False)
ax = fg.axes[0, 0]
ax.set_xticks([-1, 1])
ax.set_xticklabels(get_tick_labels('amount', actual_lows, actual_highs, units))
ax.set_yticks([-1, 1])
ax.set_yticklabels(get_tick_labels('grind_size', actual_lows, actual_highs, units))
p = ax.add_patch(patches.Rectangle((-1, -1), 2, 2, color="navy", alpha=0.3, lw=2))
cube_design = dexpy.factorial.build_factorial(3, 8)
points = np.array(cube_design)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', axisbg='w')
ax.view_init(30, -60) # rotate plot
X, Y = np.meshgrid([-1,1], [-1,1])
cube_alpha = 0.1
ax.plot_surface(X, Y, 1, alpha=cube_alpha)
ax.plot_surface(X, Y, -1, alpha=cube_alpha)
ax.plot_surface(X, -1, Y, alpha=cube_alpha)
ax.plot_surface(X, 1, Y, alpha=cube_alpha)
ax.plot_surface(1, X, Y, alpha=cube_alpha)
ax.plot_surface(-1, X, Y, alpha=cube_alpha)
ax.scatter3D(points[:, 0], points[:, 1], points[:, 2], c="b")
ax.set_xticks([-1, 1])
ax.set_xticklabels(["8mm", "10mm"])
ax.set_yticks([-1, 1])
ax.set_yticklabels(["2.5oz", "4oz"])
ax.set_zticks([-1, 1])
ax.set_zticklabels(["light", "dark"])
plt.show()
runs = 50
delta = 0.5
sigma = 1.5
alpha = 0.05
one_factor = pd.DataFrame([ -1, 1 ] * runs, columns=['beans'])
one_factor_power = dexpy.power.f_power('beans', one_factor, delta/sigma, alpha)
display(Markdown('''
# Power Example
{} pots of coffee are tested with light beans, then {} pots with dark beans.
There is a variance of {} taste rating from pot to pot. If we expect a {} change
in the taste rating when going from light to dark, what is the likelihood we would detect it?
(Answer: **{:.2f}%**)
Note: this assumes that we reject H<sub>0</sub> at p <= {}
'''.format(int(runs / 2), int(runs / 2), sigma, delta, one_factor_power[1]*100, alpha)
))
def plot_shift(runs, delta, sigma, annotate=False):
Plots two sets of random normal data, one shifted up delta units.
mean = 5
low = sigma*np.random.randn(int(runs/2),1)+mean
high = sigma*np.random.randn(int(runs/2),1)+mean+delta
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylabel("taste")
ax.set_xlabel("runs")
ax.set_ylim([0, 11])
plt.plot(np.concatenate([low, high]))
plt.plot([0, (runs/2)], [low.mean()] * 2, color='firebrick', lw=2)
plt.plot([(runs/2), runs-1], [high.mean()] * 2, color='g', lw=2)
p_value = scipy.stats.f_oneway(low, high).pvalue[0]
if annotate:
plt.annotate("p: {:.5f}".format(p_value),
xy=(runs / 2, (low.mean() + high.mean()) / 2), xycoords='data',
xytext=(.8, .9), textcoords='axes fraction',
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle='-[', linewidth=2, color='black', connectionstyle="angle3")
)
plt.show()
return [low, high]
low, high = plot_shift(runs, delta, sigma)
increased_delta = delta*4
increased_delta_power = dexpy.power.f_power('beans', one_factor, increased_delta/sigma, alpha)
display(Markdown('''
# Power Example - Increase Delta
What if we don't care about a taste increase of 0.5? That's not that much better
than the current coffee, after all. Instead, if we say we only care about a change
in rating above {}, what is the likelihood we would detect such a change?
(Answer: **{:.5f}%**)
'''.format(increased_delta, increased_delta_power[1]*100)
))
_ = plot_shift(runs, increased_delta, sigma)
decreased_sigma = sigma*0.5
decreased_sigma_power = dexpy.power.f_power('beans', one_factor, delta/decreased_sigma, alpha)
display(Markdown('''
# Power Example - Decrease Noise
Instead of lowering our standards for our noisy taste ratings, instead
we could bring in expert testers who have a much more accurate palate.
If we assume a decrease in noise from {} to {}, then we can detect a
change in rating of {} with {:.2f}% probability.
'''.format(sigma, decreased_sigma, delta, decreased_sigma_power[1]*100)
))
_ = plot_shift(runs, delta, sigma*0.1)
increased_runs = runs * 4
one_factor = pd.DataFrame([ -1, 1 ] * increased_runs, columns=['beans'])
increased_runs_power = dexpy.power.f_power('beans', one_factor, delta/sigma, alpha)
display(Markdown('''
# Power Example - Increase Runs
If expert testers are too expensive, and we are unwilling to compromise
our standards, then the only remaining option is to create a more powerful
design. In this toy example, there isn't much we can do, since it's
just one factor. Increasing the runs from {} to {} gives a power of
{:.2f}%. This may be a more acceptable success rate than the original power
of {:.2f}%, however... that is a lot of coffee to drink.
For more complicated designs changing the structure of the design
can also increase power.
'''.format(runs, increased_runs, increased_runs_power[1]*100, one_factor_power[1]*100)
))
_ = plot_shift(increased_runs, delta, sigma)
help(dexpy.power.f_power)
base_point = [-1, -1, -1, -1, -1]
ofat_points = [base_point]
for i in range(0, 5):
new_point = base_point[:]
new_point[i] = 1
ofat_points.append(new_point)
sn = 2.0
alpha = 0.05
ofat_df = pd.DataFrame(ofat_points*5, columns=['amount', 'grind_size', 'brew_time', 'grind_type', 'beans'])
model = ' + '.join(ofat_df.columns)
ofat_power = dexpy.power.f_power('+'.join(ofat_df.columns), ofat_df, sn, alpha)
ofat_power.pop(0) # remove intercept
ofat_power = ['{0:.2f}%'.format(i*100) for i in ofat_power] # convert to %
ofat_power = pd.DataFrame(ofat_power, columns=['Power'], index=ofat_df.columns)
display(Markdown('''
# Calculating Power with dexpy: OFAT
* {} total runs, with a signal to noise ratio of 2.
* Model: `{}`
'''.format(len(ofat_df), model)))
display(PrettyPandas(ofat_power))
full_design = dexpy.factorial.build_factorial(5, 2**5)
full_design.columns = ['amount', 'grind_size', 'brew_time', 'grind_type', 'beans']
factorial_power = dexpy.power.f_power(model, full_design, sn, alpha)
factorial_power.pop(0)
factorial_power = ['{0:.2f}%'.format(i*100) for i in factorial_power] # convert to %
factorial_power = pd.DataFrame(factorial_power, columns=['Power'], index=full_design.columns)
display(Markdown('''
# Calculating Power with dexpy: Factorial
* {} total runs, with a signal to noise ratio of 2.
* Model (`patsy` for: `{}`
'''.format(len(full_design), model)))
display(PrettyPandas(factorial_power))
help(dexpy.factorial.build_factorial)
coffee_design = dexpy.factorial.build_factorial(5, 2**(5-1))
coffee_design.columns = ['amount', 'grind_size', 'brew_time', 'grind_type', 'beans']
center_points = [
[0, 0, 0, -1, -1],
[0, 0, 0, -1, 1],
[0, 0, 0, 1, -1],
[0, 0, 0, 1, 1]
]
coffee_design = coffee_design.append(pd.DataFrame(center_points * 2, columns=coffee_design.columns))
coffee_design.index = np.arange(0, len(coffee_design))
actual_design = coded_to_actual(coffee_design, actual_lows, actual_highs)
display(Markdown("## 2<sup>(5-1)</sup> Factorial Design"))
display(PrettyPandas(actual_design))
model = ' + '.join(coffee_design.columns)
factorial_power = dexpy.power.f_power(model, coffee_design, sn, alpha)
factorial_power.pop(0)
factorial_power = ['{0:.2f}%'.format(i*100) for i in factorial_power] # convert to %
factorial_power = pd.DataFrame(factorial_power, columns=['Power'], index=coffee_design.columns)
display(Markdown('''
## 2<sup>(5-1)</sup> Factorial Power
* Power for {} total runs
* Signal to noise ratio of 2
* Model: `{}`
'''.format(len(coffee_design), model)))
display(PrettyPandas(factorial_power))
twofi_model = "(" + '+'.join(coffee_design.columns) + ")**2"
desc = patsy.ModelDesc.from_formula(twofi_model)
factorial_power = dexpy.power.f_power(twofi_model, coffee_design, sn, alpha)
factorial_power.pop(0)
factorial_power = ['{0:.2f}%'.format(i*100) for i in factorial_power] # convert to %
factorial_power = pd.DataFrame(factorial_power, columns=['Power'], index=desc.describe().strip("~ ").split(" + "))
display(Markdown('''
## 2<sup>(5-1)</sup> Factorial Power
* Power for {} total runs
* Signal to noise ratio of 2
* Model: `{}`
'''.format(len(coffee_design), twofi_model)))
display(Markdown("## Power for {} total runs, with a signal to noise ratio of 2.".format(len(coffee_design))))
display(PrettyPandas(factorial_power))
display(Markdown('''
# Aliasing
When you don't run all combinations of inputs, you lose the ability to estimate terms
For example, if you have three inputs the full model matrix looks like this:
'''))
three_factor_design = dexpy.factorial.build_factorial(3, 8)
X = patsy.dmatrix("(" + " + ".join(three_factor_design.columns) + ")**3", three_factor_design, return_type='dataframe')
display(PrettyPandas(X))
display(Markdown('''
## Aliasing
If we remove half the runs, so that the design is 2<sup>3-1</sup>:
'''))
X.loc[[0,3,5,6]] = ''
display(PrettyPandas(X))
display(Markdown('''
You can see that A*B*C never changes. In addition, A = BC and B = AC.
When a term is a linear combination of another term that is called an **alias**. Aliased terms are unestimable.
'''))
help(dexpy.alias.alias_list)
display(Markdown('''
## Calculating Aliases in dexpy
Here is what that alias list looks like for our Coffee Experiment:
'''))
full_model = "(" + '+'.join(coffee_design.columns) + ")**3"
aliases, _ = dexpy.alias.alias_list(full_model, coffee_design)
for a in aliases:
print(a)
display(Markdown('''
As you can see, we lose the ability to estimate some three factor interactions separately from the two factor interactions.
This is not a cause for great concern as the three factor interactions are rare in practice.
'''))
# enter response data here
coffee_design['taste_rating'] = [
4.53, 1.6336, 1.363, 8.7, 1.679, 2.895, 7.341, 3.642, # A low
6.974, 3.398, 3.913, 9.04, 5.092, 3.718, 8.227, 6.992, # A high
4.419, 6.806, 3.512, 5.36, 4.865, 6.342, 4.38, 5.942 # center points
]
lm = statsmodels.formula.api.ols("taste_rating ~" + twofi_model, data=coffee_design).fit()
print(lm.summary2())
reduced_model = "amount + grind_size + brew_time + grind_type + beans + amount:beans + grind_size:brew_time + grind_size:grind_type"
lm = statsmodels.formula.api.ols("taste_rating ~" + reduced_model, data=coffee_design).fit()
print(lm.summary2())
display(Markdown('''
If we take the experiment data from the design and use our new model to fit that data, then plot it against
the observed values we can get an idea for how well our model predicts. Points above the 45 degree line are
overpredicting for that combination of inputs. Points below the line predict a lower taste rating than
we actually measured during the experiment.'''))
actual_predicted = pd.DataFrame({ 'actual': coffee_design['taste_rating'],
'predicted': lm.fittedvalues
}, index=np.arange(len(coffee_design['taste_rating'])))
fg = sns.FacetGrid(actual_predicted, size=5)
fg.map(plt.scatter, 'actual', 'predicted')
ax = fg.axes[0, 0]
ax.plot([1, 10], [1, 10], color='g', lw=2)
ax.set_xticks(np.arange(1, 11))
ax.set_xlim([0, 11])
ax.set_yticks(np.arange(1, 11))
ax.set_title('Actual vs Predicted')
_ = ax.set_ylim([0, 11])
display(Markdown('''
Plotting the prediction for two factors at once shows how they interact with each other.
In this graph you can see that at the low brew time the larger grind size results in
a poor taste rating, likely because the coffee is too weak.'''))
pred_points = pd.DataFrame(1, columns = coffee_design.columns, index=np.arange(4))
pred_points.loc[1,'grind_size'] = -1
pred_points.loc[3,'grind_size'] = -1
pred_points.loc[2,'brew_time'] = -1
pred_points.loc[3,'brew_time'] = -1
pred_points['taste_rating'] = lm.predict(pred_points)
pred_points = coded_to_actual(pred_points, actual_lows, actual_highs)
fg = sns.factorplot('grind_size', 'taste_rating', hue='brew_time', kind='point', data=pred_points)
ax = fg.axes[0, 0]
ax.set_xticklabels(get_tick_labels('grind_size', actual_lows, actual_highs, units))
_ = ax.set_title('Grind Size/Brew Time Interaction')
display(Markdown('''
This graph contains the prediction with the highest taste rating, 7.72.
However, if you look at the dark bean line there is a point where we can get
a rating of 6.93 with 2.5oz of grounds.
'''))
pred_points = pd.DataFrame(1, columns = coffee_design.columns, index=np.arange(4))
pred_points.loc[1,'amount'] = -1
pred_points.loc[3,'amount'] = -1
pred_points.loc[2,'beans'] = -1
pred_points.loc[3,'beans'] = -1
pred_points['taste_rating'] = lm.predict(pred_points)
pred_points = coded_to_actual(pred_points, actual_lows, actual_highs)
fg = sns.factorplot('amount', 'taste_rating', hue='beans', kind='point', palette={'dark': 'maroon', 'light': 'peru'}, data=pred_points)
ax = fg.axes[0, 0]
ax.set_xticklabels(get_tick_labels('amount', actual_lows, actual_highs, units))
ax.set_title('Amount/Beans Interaction')
plt.show()
display(PrettyPandas(pred_points))
display(Markdown('''That savings of 1.5oz per pot would create a nice surplus in the coffee budget at the end of the year.'''))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What Is It For?
Step2: ((((
Step3: Factorial Design
Step5: Statistical Power
Step6: Calculating Power with dexpy
Step7: Fractional Factorials
Step8: Calculating Aliases in dexpy
Step9: Analysis
Step10: Visualization
|
6,229
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as pl
pl.style.use('ggplot')
import numpy as np
from scipy.stats import gamma
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF
from revrand import StandardLinearModel, GeneralizedLinearModel, likelihoods, Parameter, Positive
from revrand.metrics import msll, smse
from revrand.utils.datasets import gen_gausprocess_se
from revrand import basis_functions as bs
from revrand.optimize import AdaDelta, Adam
N = 150 # Number of training points
Ns = 250 # Number of test points
lenscale_true = 1.2
noise_true = 0.1
Xtrain, ytrain, Xtest, ftest = \
gen_gausprocess_se(N, Ns, lenscale=lenscale_true, noise=noise_true)
ytest = ftest + np.random.randn(Ns) * noise_true
# Common settings
nbases = 20 # Number of unique random bases to use for approximating a kernel
lenscale = gamma(a=1, scale=1) # Initial value for the lengthscale
var = gamma(a=0.1, scale=2) # Initial value for target noise
reg = gamma(a=1, scale=1) # Initial weight prior
# GLM specific settings
maxiter = 10000
batch_size = 10
updater = Adam()
# Setup random basis functions
base = bs.RandomRBF(Xdim=1,
nbases=nbases,
lenscale=Parameter(lenscale, Positive()),
regularizer=Parameter(reg, Positive())
)
# SLM
slm = StandardLinearModel(base, var=Parameter(var, Positive()),)
slm.fit(Xtrain, ytrain)
# GLM
llhood = likelihoods.Gaussian(var=Parameter(var, Positive()))
glm = GeneralizedLinearModel(llhood,
base,
batch_size=batch_size,
maxiter=maxiter,
updater=updater
)
glm.fit(Xtrain, ytrain)
# GP
kern = WhiteKernel(noise_level=np.sqrt(var.mean())) + 1**2 * RBF(length_scale=lenscale.mean())
gp = GaussianProcessRegressor(kernel=kern)
gp.fit(Xtrain, ytrain)
# SLM
Ey_e, Vy_e = slm.predict_moments(Xtest)
Sy_e = np.sqrt(Vy_e)
# GLM
Ey_g, Vf_g = glm.predict_moments(Xtest)
Vy_g = Vf_g + glm.like_hypers_
Sy_g = np.sqrt(Vy_g)
# GP
Ey_gp, Sy_gp = gp.predict(Xtest, return_std=True)
Vy_gp = Sy_gp**2
LL_s = msll(ytest, Ey_e, Vy_e, ytrain)
LL_gp = msll(ytest, Ey_gp, Vy_gp, ytrain)
LL_g = msll(ytest, Ey_g, Vy_g, ytrain)
smse_s = smse(ytest, Ey_e)
smse_gp = smse(ytest, Ey_gp)
smse_glm = smse(ytest, Ey_g)
print("SLM: msll = {}, smse = {}, noise: {}, hypers: {}"
.format(LL_s, smse_s, np.sqrt(slm.var_), slm.hypers_))
print("GLM: msll = {}, smse = {}, noise: {}, hypers: {}"
.format(LL_g, smse_glm, np.sqrt(glm.like_hypers_),
glm.basis_hypers_))
print("GP: msll = {}, smse = {}, noise: {}, hypers: {}"
.format(LL_gp, smse_gp, gp.kernel_.k1.noise_level,
gp.kernel_.k2.k2.length_scale))
Xpl_t = Xtrain.flatten()
Xpl_s = Xtest.flatten()
# Training/Truth
pl.figure(figsize=(15, 10))
pl.plot(Xpl_t, ytrain, 'k.', label='Training')
pl.plot(Xpl_s, ftest, 'k-', label='Truth')
# ELBO Regressor
pl.plot(Xpl_s, Ey_e, 'g-', label='Bayesian linear regression')
pl.fill_between(Xpl_s, Ey_e - 2 * Sy_e, Ey_e + 2 * Sy_e, facecolor='none',
edgecolor='g', linestyle='--', label=None)
# GP
pl.plot(Xpl_s, Ey_gp, 'b-', label='GP')
pl.fill_between(Xpl_s, Ey_gp - 2 * Sy_gp, Ey_gp + 2 * Sy_gp,
facecolor='none', edgecolor='b', linestyle='--',
label=None)
# GLM Regressor
pl.plot(Xpl_s, Ey_g, 'm-', label='GLM')
pl.fill_between(Xpl_s, Ey_g - 2 * Sy_g, Ey_g + 2 * Sy_g, facecolor='none',
edgecolor='m', linestyle='--', label=None)
pl.legend()
pl.grid(True)
pl.ylabel('y')
pl.xlabel('x')
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dataset settings and creation
Step2: Algorithm Settings
Step3: Parameter learning
Step4: Model Querying
Step5: Score the models
Step6: Plot predictions
|
6,230
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from ttim import *
import pandas as pd
H = 7 #aquifer thickness
zt = -18 #top boundary of aquifer
zb = zt - H #bottom boundary of aquifer
Q = 788 #constant discharge
#unkonwn parameters: kaq, Saq
ml = ModelMaq(kaq=60, z=[zt, zb], Saq=1e-4, tmin=1e-5, tmax=1)
w = Well(ml, xw=0, yw=0, rw=0.2, tsandQ=[(0, Q)], layers=0)
ml.solve(silent='True')
#time and drawdown of piezometer 30m away from pumping well
data1 = np.loadtxt('data/piezometer_h30.txt', skiprows = 1)
t1 = data1[:, 0] / 60 / 24 #convert min to days
h1 = data1[:, 1]
r1 = 30
#time and drawdown of piezometer 90m away from pumping well
data2 = np.loadtxt('data/piezometer_h90.txt', skiprows = 1)
t2 = data2[:, 0] / 60 / 24 #convert min to days
h2 = data2[:, 1]
r2 = 90
ca1 = Calibrate(ml)
ca1.set_parameter(name='kaq0', initial=10)
ca1.set_parameter(name='Saq0', initial=1e-4)
ca1.series(name='obs1', x=r1, y=0, t=t1, h=h1, layer=0)
ca1.fit(report=True)
display(ca1.parameters)
print('rmse:', ca1.rmse())
hm1 = ml.head(r1, 0, t1)
plt.figure(figsize=(8, 5))
plt.semilogx(t1, h1, '.', label='obs at 30 m')
plt.semilogx(t1, hm1[0], label='ttim at 30 m')
plt.xlabel('time(d)')
plt.ylabel('drawdown(m)')
plt.title('ttim analysis for Oude Korendijk')
plt.legend();
ca2 = Calibrate(ml)
ca2.set_parameter(name='kaq0', initial=10)
ca2.set_parameter(name='Saq0', initial=1e-4)
ca2.series(name='obs2', x=r2, y=0, t=t2, h=h2, layer=0)
ca2.fit(report=True)
display(ca2.parameters)
print('rmse:', ca2.rmse())
hm2 = ml.head(r2, 0, t2)
plt.figure(figsize=(8, 5))
plt.semilogx(t2, h2, '.', label='obs at 90 m')
plt.semilogx(t2, hm2[0], label='ttim at 90 m')
plt.xlabel('time(d)')
plt.ylabel('drawdown(m)')
plt.title('ttim analysis for Oude Korendijk')
plt.legend();
ca = Calibrate(ml)
ca.set_parameter(name='kaq0', initial=10)
ca.set_parameter(name='Saq0', initial=1e-4)
ca.series(name='obs1', x=r1, y=0, t=t1, h=h1, layer=0)
ca.series(name='obs2', x=r2, y=0, t=t2, h=h2, layer=0)
ca.fit(report=True)
display(ca.parameters)
print('rmse:', ca.rmse())
hs1 = ml.head(r1, 0, t1)
hs2 = ml.head(r2, 0 ,t2)
plt.figure(figsize = (8, 5))
plt.semilogx(t1, h1, '.', label='obs at 30m')
plt.semilogx(t1, hs1[0], label='ttim at 30 m')
plt.semilogx(t2, h2, '.', label='obs at 90m')
plt.semilogx(t2, hs2[0], label = 'ttim at 90m')
plt.xlabel('time(d)')
plt.ylabel('drawdown(m)')
plt.title('ttim analysis for Oude Korendijk')
plt.legend();
#unknown parameters: kaq, Saq and rc
ml1 = ModelMaq(kaq=60, z=[zt, zb], Saq=1e-4, tmin=1e-5, tmax=1)
w1 = Well(ml1, xw=0, yw=0, rw=0.2, rc=0.2, tsandQ=[(0, Q)], layers=0)
ml1.solve(silent='True')
ca3 = Calibrate(ml1)
ca3.set_parameter(name='kaq0', initial=10)
ca3.set_parameter(name='Saq0', initial=1e-4)
ca3.set_parameter_by_reference(name='rc', parameter=w1.rc[0:], initial=0.2, pmin=0.01)
ca3.series(name='obs1', x=r1, y=0, t=t1, h=h1, layer=0)
ca3.fit(report=True)
display(ca3.parameters)
print('rmse:', ca3.rmse())
hm3 = ml1.head(r1, 0, t1)
plt.figure(figsize=(8, 5))
plt.semilogx(t1, h1, '.', label='obs at 30 m')
plt.semilogx(t1, hm3[0], label='ttim at 30 m')
plt.xlabel('time(d)')
plt.ylabel('drawdown(m)')
plt.title('ttim analysis for Oude Korendijk')
plt.legend();
ca4 = Calibrate(ml1)
ca4.set_parameter(name='kaq0', initial=10)
ca4.set_parameter(name='Saq0', initial=1e-4)
ca4.set_parameter_by_reference(name='rc', parameter=w1.rc[0:], initial=0.2, pmin=0.01)
ca4.series(name='obs2', x=r2, y=0, t=t2, h=h2, layer=0)
ca4.fit(report=True)
display(ca4.parameters)
print('rmse:', ca4.rmse())
hm4 = ml1.head(r2, 0, t2)
plt.figure(figsize=(8, 5))
plt.semilogx(t2, h2, '.', label='obs at 90 m')
plt.semilogx(t2, hm4[0], label='ttim at 90 m')
plt.xlabel('time(d)')
plt.ylabel('drawdown(m)')
plt.title('ttim analysis for Oude Korendijk')
plt.legend();
ca0 = Calibrate(ml1)
ca0.set_parameter(name='kaq0', initial=10)
ca0.set_parameter(name='Saq0', initial=1e-4)
ca0.set_parameter_by_reference(name='rc', parameter=w1.rc[0:], initial=0.2, pmin=0.01)
ca0.series(name='obs1', x=r1, y=0, t=t1, h=h1, layer=0)
ca0.series(name='obs2', x=r2, y=0, t=t2, h=h2, layer=0)
ca0.fit(report=True)
display(ca0.parameters)
print('rmse:', ca0.rmse())
hs1 = ml1.head(r1, 0, t1)
hs2 = ml1.head(r2, 0 ,t2)
plt.figure(figsize = (8, 5))
plt.semilogx(t1, h1, '.', label='obs at 30m')
plt.semilogx(t1, hs1[0], label='ttim at 30 m')
plt.semilogx(t2, h2, '.', label='obs at 90m')
plt.semilogx(t2, hs2[0], label = 'ttim at 90m')
plt.xlabel('time(d)')
plt.ylabel('drawdown(m)')
plt.title('ttim analysis for Oude Korendijk')
plt.legend();
t0 = pd.DataFrame(columns=['obs 30 m', 'obs 90 m', 'obs simultaneously'], index=['without rc', 'with rc'])
t0.loc['without rc', 'obs 30 m'] = ca1.rmse()
t0.loc['without rc', 'obs 90 m'] = ca2.rmse()
t0.loc['without rc', 'obs simultaneously'] = ca.rmse()
t0.loc['with rc', 'obs 30 m'] = ca3.rmse()
t0.loc['with rc', 'obs 90 m'] = ca4.rmse()
t0.loc['with rc', 'obs simultaneously'] = ca0.rmse()
print('RMSE of two conceptual models:')
t0
t = pd.DataFrame(columns=['k [m/d]', 'Ss [1/m]', 'RMSE'], \
index=['K&dR', 'ttim', 'AQTESOLV', 'MLU'])
t.loc['ttim'] = np.append(ca.parameters['optimal'].values, ca.rmse())
t.loc['AQTESOLV'] = [66.086, 2.541e-05, 0.05006]
t.loc['MLU'] = [66.850, 2.400e-05, 0.05083]
t.loc['K&dR'] = [55.71429, 1.7E-4, '-']
t
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set basic parameters for the model
Step2: Create conceptual model
Step3: Load data of two observation wells
Step4: Calibrate using only the data from observation well 1
Step5: Calibrate using only the data from observation well 2
Step6: Calibrate model with two datasets simultaneously
Step7: Investigate whether adding well bore storage improves the fit
Step8: Calibrate using only the data from observation well 1
Step9: Calibrate using only the data from observation well 2
Step10: Calibrate model with two datasets simultaneously
Step11: Summary of values presented in Kruseman and de Ridder (1970)
Step12: Adding wellbore storage improve the performance when use drawdown data of two observation wells respectively. However, when calibrate model with two datasets simultaneously, rc approaches minimum value. Adding rc does not improve the performance much.
|
6,231
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'miroc', 'miroc-es2h', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
6,232
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet # IGNORE_COPYRIGHT: cleared by OSS licensing
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
BATCH_SIZE = 32
IMG_SIZE = (160, 160)
train_dataset = tf.keras.utils.image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
validation_dataset = tf.keras.utils.image_dataset_from_directory(validation_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.RandomFlip('horizontal'),
tf.keras.layers.RandomRotation(0.2),
])
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
rescale = tf.keras.layers.Rescaling(1./127.5, offset=-1)
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = IMG_SIZE + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.trainable = False
# Let's take a look at the base model architecture
base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
inputs = tf.keras.Input(shape=(160, 160, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
len(model.trainable_variables)
initial_epochs = 10
loss0, accuracy0 = model.evaluate(validation_dataset)
print("initial loss: {:.2f}".format(loss0))
print("initial accuracy: {:.2f}".format(accuracy0))
history = model.fit(train_dataset,
epochs=initial_epochs,
validation_data=validation_dataset)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer = tf.keras.optimizers.RMSprop(learning_rate=base_learning_rate/10),
metrics=['accuracy'])
model.summary()
len(model.trainable_variables)
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_dataset,
epochs=total_epochs,
initial_epoch=history.epoch[-1],
validation_data=validation_dataset)
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
loss, accuracy = model.evaluate(test_dataset)
print('Test accuracy :', accuracy)
# Retrieve a batch of images from the test set
image_batch, label_batch = test_dataset.as_numpy_iterator().next()
predictions = model.predict_on_batch(image_batch).flatten()
# Apply a sigmoid since our model returns logits
predictions = tf.nn.sigmoid(predictions)
predictions = tf.where(predictions < 0.5, 0, 1)
print('Predictions:\n', predictions.numpy())
print('Labels:\n', label_batch)
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(class_names[predictions[i]])
plt.axis("off")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Transfer learning and fine-tuning
Step2: Data preprocessing
Step3: Show the first nine images and labels from the training set
Step4: As the original dataset doesn't contain a test set, you will create one. To do so, determine how many batches of data are available in the validation set using tf.data.experimental.cardinality, then move 20% of them to a test set.
Step5: Configure the dataset for performance
Step6: Use data augmentation
Step7: Note
Step8: Rescale pixel values
Step9: Note
Step10: Note
Step11: This feature extractor converts each 160x160x3 image into a 5x5x1280 block of features. Let's see what it does to an example batch of images
Step12: Feature extraction
Step13: Important note about BatchNormalization layers
Step14: Add a classification head
Step15: Apply a tf.keras.layers.Dense layer to convert these features into a single prediction per image. You don't need an activation function here because this prediction will be treated as a logit, or a raw prediction value. Positive numbers predict class 1, negative numbers predict class 0.
Step16: Build a model by chaining together the data augmentation, rescaling, base_model and feature extractor layers using the Keras Functional API. As previously mentioned, use training=False as our model contains a BatchNormalization layer.
Step17: Compile the model
Step18: The 2.5 million parameters in MobileNet are frozen, but there are 1.2 thousand trainable parameters in the Dense layer. These are divided between two tf.Variable objects, the weights and biases.
Step19: Train the model
Step20: Learning curves
Step21: Note
Step22: Compile the model
Step23: Continue training the model
Step24: Let's take a look at the learning curves of the training and validation accuracy/loss when fine-tuning the last few layers of the MobileNetV2 base model and training the classifier on top of it. The validation loss is much higher than the training loss, so you may get some overfitting.
Step25: Evaluation and prediction
Step26: And now you are all set to use this model to predict if your pet is a cat or dog.
|
6,233
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='linear', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(100)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='linear', use_bias=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(101)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(2,2), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(102)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (7, 7, 3)
conv = Conv2D(5, (4,4), strides=(2,1), padding='valid',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(103)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='same',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(104)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 4, 2)
conv = Conv2D(4, (3,3), strides=(2,2), padding='same',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(105)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 3, 1)
conv = Conv2D(4, (3,3), strides=(3,2), padding='same',
data_format='channels_last', dilation_rate=(1,1),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(106)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(2,2),
activation='linear', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(100)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.7'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 5, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(2,2),
activation='linear', use_bias=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(101)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.8'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (7, 7, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='valid',
data_format='channels_last', dilation_rate=(3,3),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(102)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.9'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 8, 3)
conv = Conv2D(3, (4,4), strides=(1,1), padding='same',
data_format='channels_last', dilation_rate=(2,2),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(103)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.10'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (8, 8, 2)
conv = Conv2D(4, (3,3), strides=(1,1), padding='same',
data_format='channels_last', dilation_rate=(4,4),
activation='relu', use_bias=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = conv(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for w in model.get_weights():
np.random.seed(104)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
print('b shape:', weights[1].shape)
print('b:', format_decimal(weights[1].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.Conv2D.11'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
import os
filename = '../../../test/data/layers/convolutional/Conv2D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [convolutional.Conv2D.1] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(1,1), activation='linear', use_bias=False
Step2: [convolutional.Conv2D.2] 4 3x3 filters on 5x5x2 input, strides=(2,2), padding='valid', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step3: [convolutional.Conv2D.3] 5 4x4 filters on 7x7x3 input, strides=(2,1), padding='valid', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step4: [convolutional.Conv2D.4] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='same', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step5: [convolutional.Conv2D.5] 4 3x3 filters on 4x4x2 input, strides=(2,2), padding='same', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step6: [convolutional.Conv2D.6] 4 3x3 filters on 6x3x1 input, strides=(3,2), padding='same', data_format='channels_last', dilation_rate=(1,1), activation='relu', use_bias=True
Step7: [convolutional.Conv2D.7] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(2,2), activation='linear', use_bias=True
Step8: [convolutional.Conv2D.8] 4 3x3 filters on 5x5x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(2,2), activation='linear', use_bias=False
Step9: [convolutional.Conv2D.9] 4 3x3 filters on 7x7x2 input, strides=(1,1), padding='valid', data_format='channels_last', dilation_rate=(3,3), activation='relu', use_bias=True
Step10: [convolutional.Conv2D.10] 3 4x4 filters on 4x8x3 input, strides=(1,1), padding='same', data_format='channels_last', dilation_rate=(2,2), activation='relu', use_bias=True
Step11: [convolutional.Conv2D.11] 4 3x3 filters on 8x8x2 input, strides=(1,1), padding='same', data_format='channels_last', dilation_rate=(4,4), activation='relu', use_bias=True
Step12: export for Keras.js tests
|
6,234
|
<ASSISTANT_TASK:>
Python Code:
import os
import inspect
import sys
import pandas as pd
import charts
from opengrid.library import houseprint
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 16,8
hp = houseprint.Houseprint()
# for testing:
# hp = houseprint.Houseprint(spreadsheet='unit and integration test houseprint')
hp
hp.sites[:5]
hp.get_devices()[:4]
hp.get_sensors('water')[:3]
hp.save('new_houseprint.pkl')
hp = houseprint.load_houseprint_from_file('new_houseprint.pkl')
hp.init_tmpo()
hp._tmpos.debug = False
hp.sync_tmpos()
hp.find_site(1)
hp.find_device('FL03001556')
sensor = hp.find_sensor('d5a747b86224834f745f4c9775d70241')
print(sensor.site)
print(sensor.unit)
hp.search_sites(inhabitants=5)
hp.search_sensors(type='electricity', direction='Import')
head = pd.Timestamp('20151102')
tail = pd.Timestamp('20151103')
df = hp.get_data(sensortype='water', head=head,tail=tail, diff=True, resample='min', unit='l/min')
charts.plot(df, stock=True, show='inline')
site = hp.find_site(1)
site
print(site.size)
print(site.inhabitants)
print(site.postcode)
print(site.construction_year)
print(site.k_level)
print(site.e_level)
print(site.epc_cert)
site.devices
site.get_sensors('electricity')
head = pd.Timestamp('20150617')
tail = pd.Timestamp('20150628')
df=site.get_data(sensortype='electricity', head=head,tail=tail, diff=True, unit='kW')
charts.plot(df, stock=True, show='inline')
device = hp.find_device('FL03001552')
device
device.key
device.get_sensors('gas')
head = pd.Timestamp('20151101')
tail = pd.Timestamp('20151104')
df = hp.get_data(sensortype='gas', head=head,tail=tail, diff=True, unit='kW')
charts.plot(df, stock=True, show='inline')
sensor = hp.find_sensor('53b1eb0479c83dee927fff10b0cb0fe6')
sensor
sensor.key
sensor.type
sensor.description
sensor.system
sensor.unit
head = pd.Timestamp('20150617')
tail = pd.Timestamp('20150618')
df=sensor.get_data(head,tail,diff=True, unit='W')
charts.plot(df, stock=True, show='inline')
sensors = hp.search_sensors(type='electricity', system='solar')
print(sensors)
df = hp.get_data(sensors=sensors, head=head, tail=tail, diff=True, unit='W')
charts.plot(df, stock=True, show='inline')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Houseprint
Step2: A Houseprint object can be saved as a pickle. It loses its tmpo session however (connections cannot be pickled)
Step3: TMPO
Step4: Lookup sites, devices, sensors based on key
Step5: Lookup sites, devices, sensors based on search criteria
Step6: Get Data
Step7: Site
Step8: Device
Step9: Sensor
Step10: Getting data for a selection of sensors
|
6,235
|
<ASSISTANT_TASK:>
Python Code:
from resources.iot.device import IoT_sensor_consumer
from IPython.core.display import display
import ipywidgets as widgets
from resources.iot.device import IoT_mqtt_publisher, IoT_sensor
widgets.FloatProgress(value=30.0, min=0, max=100.0, bar_style='danger', orientation='vertical')
widgets.FloatProgress(value=60.0, min=0, max=100.0, bar_style='info', orientation='horizontal', description='pressão: ')
widgets.Label("isto é um label")
widget = widgets.FloatProgress(min=0, max=100.0, bar_style='info', orientation='vertical', description='exemplo') # 'success', 'info', 'warning', 'danger' or ''
widget_label = widgets.Label("isto é um label")
display(widget, widget_label)
consumer = IoT_sensor_consumer("localhost",1883,"sensor/+/+")
consumer.connect(widget, widget_label)
widget_1 = widgets.FloatProgress(min=0, max=40.0, bar_style='info', orientation='vertical')
widget_1_label = widgets.Label()
consumer_1 = IoT_sensor_consumer("localhost",1883,"sensor/1/+")
widget_2 = widgets.FloatProgress(min=0, max=90.0, bar_style='warning', orientation='vertical')
widget_2_label = widgets.Label()
consumer_2 = IoT_sensor_consumer("localhost",1883,"sensor/2/+")
widget_3 = widgets.FloatProgress(min=0, max=40.0, bar_style='info', orientation='vertical')
widget_3_label = widgets.Label()
consumer_3 = IoT_sensor_consumer("localhost",1883,"sensor/3/+")
widget_4 = widgets.FloatProgress(min=0, max=90.0, bar_style='warning', orientation='vertical')
widget_4_label = widgets.Label()
consumer_4 = IoT_sensor_consumer("localhost",1883,"sensor/4/+")
widget_avg = widgets.FloatProgress(min=0, max=90.0, bar_style='success', orientation='horizontal')
widget_avg_label = widgets.Label()
consumer_avg = IoT_sensor_consumer("localhost",1883,"sensor/*/temperature/avg")
separator = widgets.Label(value=" ---------- ")
col_1 = widgets.VBox([widget_1, widget_1_label])
col_2 = widgets.VBox([widget_2, widget_2_label])
col_3 = widgets.VBox([widget_3, widget_3_label])
col_4 = widgets.VBox([widget_4, widget_4_label])
col_5 = widgets.HBox([widget_avg, widget_avg_label])
row_1 = widgets.HBox([separator, col_1, separator, col_2, separator, col_3, separator, col_4])
row_2 = widgets.HBox([col_5])
display(row_1)
display(row_2)
consumer_1.connect(widget_1, widget_1_label)
consumer_2.connect(widget_2, widget_2_label)
consumer_3.connect(widget_3, widget_3_label)
consumer_4.connect(widget_4, widget_4_label)
consumer_avg.connect(widget_avg, widget_avg_label)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Barra
Step2: Label
Step3: Criando dois componentes visuais
Step4: Renderizando componentes visuais
Step5: Criando um componente que consome mensagens do MQTT
Step6: Conectando os componentes
Step7: Dashboard de componentes
Step8: Criando o consumidor_2 e seus widgets
Step9: Criando o consumidor_3 e seus widgets
Step10: Criando o consumidor_4 e seus widgets
Step11: Criando o consumidor_avg e seus widgets
Step12: Organizando os componentes visualmente
Step13: Conectando componentes visuais e seus respectivos consumidores
|
6,236
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csiro-bom', 'sandbox-2', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
6,237
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
import pickle
import matplotlib.pyplot as plt
def get_batch(cifar10_dataset_folder_path, batch_id):
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
return batch
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
my_batch=get_batch(cifar10_dataset_folder_path, batch_id)
print(type(my_batch))
print(my_batch.keys())
for key in my_batch:
print ('*', key, type(my_batch[key]))
for key in my_batch:
if key not in ['batch_label']: #since batch_label is a string
print ('**', key, 'length or size is: ',len(my_batch[key]))
print(my_batch['batch_label'])
print(my_batch['data'].shape)
print(my_batch['data'][0].shape)
print(my_batch['data'][0][0])
print ('***', 'Type of lables is: ',type(my_batch['labels'][0]), ' and type of filenames is: ', type(my_batch['filenames'][0]))
my_features, my_labels=helper.load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
print("In batch #",batch_id," no of features is: ",len(my_features), "and number of labels is: ",len(my_labels))
print('Unique labels are: ',set(my_labels))
print("Length of the first feature is: ",len(my_features[0]))
for feature in my_features[0]:
print (feature,"\n")
print ('First array in first feature is of length: ',len(my_features[0][0]))
print ('Array in First array in first feature is of length: ',len(my_features[0][0][0]))
print ('Thus the feature is a matrix of 32 x 32 x 3 and that is how it maps to the image 32 x 32 x 3')
print("Label # of the first feature is: ",my_labels[0],' which is a ', helper._load_label_names()[my_labels[0]], ', and file name is: ', my_batch['filenames'][0] )
plt.figure(1)
plt.imshow(my_features[0])
plt.show()
plt.figure(2)
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
plt.show()
print('Now we know the data...I think')
#import sklearn.preprocessing
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# myway
x_max=np.amax(x)
x_min=np.amin(x)
# print(x_max, x_min)
norm_x=(x)/(x_max-x_min)
# print(np.amax(norm_x), np.amin(norm_x))
# print(norm_x)
return norm_x
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
from sklearn.preprocessing import label_binarize
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
x_hot=label_binarize(x, classes=np.arange(10))
print(x_hot)
return x_hot
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function - Done below
print(list(image_shape))
my_shape = (None,)+image_shape
x = tf.placeholder(tf.float32, shape=my_shape, name='x') #Why can't the datatype be int16 here? why float and why float32?
return x
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function - Done below
my_classes=[None,n_classes]
y = tf.placeholder(tf.float32,my_classes,name="y")#there are only 10 unique classeswhy float32?
return y
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
kp = tf.placeholder(tf.float32, name="keep_prob")
return kp
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# TODO: Implement Function
print("x_tesor has shape:",x_tensor.shape)
print("x_tesor has depth:",x_tensor.shape[3])
print("number of outputs is:",conv_num_outputs)
print("kernel size is:",conv_ksize)
print("kernel stride size is:",conv_strides)
print("pool kernel size is:",pool_ksize)
print("pool stride size is:",pool_strides)
input_depth = x_tensor.shape[3].value
kernel_height, kernel_width = conv_ksize
weight = tf.Variable(tf.truncated_normal((kernel_height, kernel_width, input_depth, conv_num_outputs)))
bias = tf.Variable(tf.zeros(conv_num_outputs))
c_height, c_width = conv_strides
conv_layer = tf.nn.conv2d(x_tensor, weight, strides=[1, c_height, c_width, 1], padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
k_height, k_width = pool_ksize
p_height, p_width = pool_strides
conv_layer = tf.nn.max_pool(conv_layer, ksize=[1, k_height, k_width, 1], strides=[1, p_height, p_width, 1], padding='SAME')
return conv_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE - Done
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
print(x_tensor.shape)
print(tf.contrib.layers.flatten(x_tensor).shape)
return tf.contrib.layers.flatten(x_tensor)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
print(tf.contrib.layers.fully_connected(x_tensor, num_outputs).shape)
return tf.contrib.layers.fully_connected(x_tensor, num_outputs)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
return tf.layers.dense(inputs=x_tensor, units=num_outputs, activation=None)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
#let's say I want conv_num_outputs to be 10,conv_ksize (3,3), conv_strides to be (2,2), pool_ksize (4,4) and pool_strides(3,3)
x = conv2d_maxpool(x, 10, (3, 3), (2, 2), (4, 4), (3, 3))
x = tf.nn.dropout(x, keep_prob)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
x = flatten(x)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
x = fully_conn(x, 512)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
x = tf.nn.dropout(x, keep_prob)
# TODO: return output
return output(x, 10)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function - Done
session.run(optimizer, feed_dict={keep_prob: keep_probability, x: feature_batch, y: label_batch})
pass
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function -Done
loss = session.run(cost, feed_dict={ x: feature_batch, y: label_batch, keep_prob: 1.})
valid_acc = session.run(accuracy, feed_dict={ x: valid_features, y: valid_labels, keep_prob: 1.})
print('| Loss: {:>10.4f} | Validation Accuracy: {:.6f}'.format(loss, valid_acc))
# TODO: Tune Parameters
epochs = 38 #tried different number of epochs and had got best result (Accuracy: 0.557200, 0.558600, 0.563800) with 37~38
batch_size = 100 #and batch size of 50 or 100 it's as good as it gets
keep_probability = 0.70
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Interesting Results
|
6,238
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.rcParams = plt.rcParamsOrig
from tqdm.notebook import trange
class HMM:
def __init__(self, p_start, p_trans, p_emit, p_stop=None):
assert p_trans.shape[0] == p_emit.shape[0]
self.p_start = p_start
self.p_trans = p_trans
self.p_emit = p_emit
if p_stop is not None:
self.p_stop = p_stop
else:
self.p_stop = np.ones(self.p_trans.shape[0]) # mengisi dengan 1 untuk probabilitas berhenti
def forward(self, sequence: list):
a = []
for i, x in enumerate(sequence):
if i == 0:
a.append((self.p_start * self.p_emit[x]).values)
else:
a.append(
a[i-1].dot(
self.p_trans.dot(np.diag(self.p_emit[x]))
)
)
self.a = np.array(a)
return self
def backward(self, sequence: list):
b = []
prev_x = sequence[-1]
for i, x in enumerate(sequence[::-1]): # membaca observasi dari belakang ke depan
if i == 0:
b.append(self.p_stop)
else:
b.append(
b[i-1].dot(
np.diag(self.p_emit[prev_x]).dot(
self.p_trans.T
)
)
)
prev_x = x
self.b = np.array(b[::-1]) # mengembalikan urutan β
return self
def forward_backward(self, sequence: list):
return self.forward(sequence).backward(sequence).normalised_gamma
@property
def gamma(self):
γ = α * β
Mengembalikan nilai γ di setiap state.
pass
@property
def normalised_gamma(self):
Normalisasi γ agar mendapatkan probabilitas.
Total semua nilai dalam satu baris = 1.
pass
def log_probability(self, seq: list) -> float:
Menjalankan forward-backward agar mendapatkan nilai γ.
Mengembalikan nilai log dari jumlah nilai γ dari salah satu state.
Catatan: Total nilai γ di setiap state selalu sama.
pass
def fit(self, seq: list, max_iter=100):
Memperbaiki nilai p_emit berdasarkan observasi yang dilakukan.
Tidak mengembalikan nilai apapun.
pass
def predict(self, seq):
return self.forward_backward(seq).argmax(axis=1)
def predict_proba(self, seq):
return self.forward_backward(seq)
# Kasus uji - jangan diubah!
p_emit = pd.DataFrame({
's1': {'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25},
's2': {'A': 0.10, 'C': 0.40, 'G': 0.40, 'T': 0.10}
}).T
p_trans = pd.DataFrame({
's1': {'s1': 0.89, 's2': 0.10},
's2': {'s1': 0.1, 's2': 0.9}
}).T
mdl = HMM(
p_start=np.array([0.5, 0.5]),
p_trans=p_trans,
p_emit=p_emit,
p_stop=np.array([0.01, 0.])
)
seq = list('CGACTACTGACTACTCGCCGACGCGACTGCCGTCTATACTGCGCATACGGC')
hmm_predictions = mdl.predict(seq)
print("sequence: {}".format(''.join(seq)))
print("hmm pred: {}".format(''.join(map(str, hmm_predictions))))
assert ''.join(map(str, hmm_predictions)) == "000000000000000111111111111111100000000000000000000"
assert np.allclose(mdl.forward_backward(seq).sum(axis=1), np.ones(len(seq)))
assert np.isclose(mdl.log_probability(seq), -76.1486)
import string
def translate(states: list):
pass
cipher_text = 'cipx qidnsi xdyedlsxi yldnid xdx pldtizicid clplwylciid xdyedlsxi bim bim tidn pldnldix vlpxdyibid clcuisiid yid mixd mixd yxslmldnniwicid yldnid aiwi slcsipi yid yimip zlpve tidn slsxdnciz sxdncizdti'
K = 27
p_trans = pd.read_csv('https://raw.githubusercontent.com/aliakbars/uai-ai/master/datasets/p_trans.csv', index_col=0)
# Kode Anda di sini
import re
articles = pd.read_csv(
'https://raw.githubusercontent.com/aliakbars/uai-ai/master/datasets/ind_news_2012_10K-sentences.txt',
sep='\t', header=None, quoting=3
)[1][:2000]
articles = articles.str.lower().apply(lambda x: re.findall("[a-z]+", x))
articles
from collections import Counter
from nltk import ngrams
# Kode Anda di sini
# Kode Anda di sini
# Kode ini digunakan untuk Laplace smoothing
dfc = (
df
.set_index(['w1','w2'])
.freq
.unstack(fill_value=0)
.stack()
.reset_index(name='freq')
)
dfc['freq'] += 1
def get_next_word(dfc, w):
pass
def get_next_random_word(dfc, w, n=5):
pass
def generate_sentence(dfc, n=10):
pass
generate_sentence(dfc)
# Kode Anda di sini
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: 1. Hidden Markov Model (10 poin)
Step6: Contoh kasus di bawah ini diadaptasi dari sini. Contoh ini adalah penggunaan HMM untuk mencari sequence DNA yang banyak mengandung nukleotida CG. Contoh kasus ini merupakan penyederhanaan dari kasus biologi yang riil dalam DNA sequencing.
Step7: Soal 1.2 (2 poin)
Step8: Soal 1.3.b (bonus - 3 poin)
Step9: 2. Natural Language Processing (10 poin)
Step10: Soal 2.1 (2 poin)
Step11: Soal 2.2 (2 poin)
Step12: Soal 2.3 (2 poin)
Step13: Soal 2.4 (2 poin)
Step14: Soal 2.5 (2 poin)
Step15: Soal 2.6 (bonus - 5 poin)
|
6,239
|
<ASSISTANT_TASK:>
Python Code:
# Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1) # Take first run
# To save time and memory for this demo, we'll just use the first
# 2.5 minutes (all we need to get 30 total events) and heavily
# resample 480->60 Hz (usually you wouldn't do either of these!)
raw = raw.crop(0, 150.).load_data().resample(60, npad='auto')
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, None, method='iir', n_jobs=1)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = None # no baseline as high-pass is applied
reject = dict(mag=3e-12)
# Make source space
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = mne.setup_source_space('spm', fname=None, spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
forward = mne.convert_forward_solution(forward, surf_ori=True)
del src
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
clim = dict(kind='value', lims=[0, 2.5, 5])
samples_epochs = 5, 15,
method = 'empirical', 'shrunk'
colors = 'steelblue', 'red'
evokeds = list()
stcs = list()
methods_ordered = list()
for n_train in samples_epochs:
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
epochs_train.equalize_event_counts(event_ids, copy=False)
assert len(epochs_train) == 2 * n_train
noise_covs = compute_covariance(
epochs_train, method=method, tmin=None, tmax=0, # baseline only
return_estimators=True) # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
del epochs_train, events_
# do contrast
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
stcs.append(list())
methods_ordered.append(list())
for cov in noise_covs:
inverse_operator = make_inverse_operator(evokeds[0].info, forward,
cov, loose=0.2, depth=0.8,
rank=274)
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
methods_ordered[-1].append(cov['method'])
stcs[-1].append(stc)
del inverse_operator, evokeds, cov, noise_covs, stc, stc_a, stc_b
del raw, forward # save some memory
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 6))
def brain_to_mpl(brain):
convert image to be usable with matplotlib
tmp_path = op.abspath(op.join(op.curdir, 'my_tmp'))
brain.save_imageset(tmp_path, views=['ven'])
im = imread(tmp_path + '_ven.png')
os.remove(tmp_path + '_ven.png')
return im
for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))):
# compute stc based on worst and best
ax_dynamics = axes[1]
for stc, ax, method, kind, color in zip(stcs[ni],
axes[::2],
methods_ordered[ni],
['best', 'worst'],
colors):
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim)
brain.set_time(175)
im = brain_to_mpl(brain)
brain.close()
del brain
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(method, kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set_title('{0} epochs'.format(n_train * 2))
ax_dynamics.set_xlabel('Time (ms)')
ax_dynamics.set_ylabel('Source Activation (dSPM)')
ax_dynamics.set_xlim(tmin * 1e3, tmax * 1e3)
ax_dynamics.set_ylim(-3, 3)
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.4, left=0.03, right=0.98, wspace=0.07)
fig.canvas.draw()
fig.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get data
Step2: Estimate covariances
Step4: Show the resulting source estimates
|
6,240
|
<ASSISTANT_TASK:>
Python Code:
import modin.pandas as pd
import pandas
import numpy as np
import time
frame_data = np.random.randint(0, 100, size=(2**18, 2**8))
df = pd.DataFrame(frame_data).add_prefix("col")
pandas_df = pandas.DataFrame(frame_data).add_prefix("col")
modin_start = time.time()
print(df.mask(df < 50))
modin_end = time.time()
print("Modin mask took {} seconds.".format(round(modin_end - modin_start, 4)))
pandas_start = time.time()
print(pandas_df.mask(pandas_df < 50))
pandas_end = time.time()
print("pandas mask took {} seconds.".format(round(pandas_end - pandas_start, 4)))
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.core.dataframe.algebra import TreeReduce
PandasQueryCompiler.neg_kurtosis_custom = TreeReduce.register(lambda cell_value, **kwargs: ~cell_value,
pandas.DataFrame.kurtosis)
from pandas._libs import lib
# The function signature came from the pandas documentation:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.kurtosis.html
def neg_kurtosis_func(self, axis=lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs):
# We need to specify the axis for the query compiler
if axis in [None, lib.no_default]:
axis = 0
# The constructor allows you to pass in a query compiler as a keyword argument
# Reduce dimension is used for reduces
# We also pass all keyword arguments here to ensure correctness
return self._reduce_dimension(
self._query_compiler.neg_kurtosis_custom(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
)
pd.DataFrame.neg_kurtosis_custom = neg_kurtosis_func
start = time.time()
print(pandas_df.applymap(lambda cell_value: ~cell_value).kurtosis())
end = time.time()
pandas_duration = end - start
print("pandas unary negation kurtosis took {} seconds.".format(pandas_duration))
start = time.time()
print(df.applymap(lambda x: ~x).kurtosis())
end = time.time()
modin_duration = end - start
print("Modin unary negation kurtosis took {} seconds.".format(modin_duration))
custom_start = time.time()
print(df.neg_kurtosis_custom())
custom_end = time.time()
modin_custom_duration = custom_end - custom_start
print("Modin neg_kurtosis_custom took {} seconds.".format(modin_custom_duration))
from IPython.display import Markdown, display
display(Markdown("### As expected, Modin is {}x faster than pandas when chaining the functions; however we see that our custom function is even faster than that - beating pandas by {}x, and Modin (when chaining the functions) by {}x!".format(round(pandas_duration / modin_duration, 2), round(pandas_duration / modin_custom_duration, 2), round(modin_duration / modin_custom_duration, 2))))
modin_mad_custom_start = time.time()
# Implement your function here! Put the result of your custom squared `mad` in the variable `modin_mad_custom`
# Hint: Look at the kurtosis walkthrough above
modin_mad_custom = ...
print(modin_mad_custom)
modin_mad_custom_end = time.time()
# Evaluation code, do not change!
modin_mad_start = time.time()
modin_mad = df.applymap(lambda x: x**2).mad()
print(modin_mad)
modin_mad_end = time.time()
assert modin_mad_end - modin_mad_start > modin_mad_custom_end - modin_mad_custom_start, \
"Your implementation was too slow, or you used the chaining functions approach. Try again"
assert modin_mad._to_pandas().equals(modin_mad_custom._to_pandas()), "Your result did not match the result of chaining the functions, try again"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Concept for exercise
Step2: Speed improvements
Step3: Congratulations! You have just implemented new DataFrame functionality!
|
6,241
|
<ASSISTANT_TASK:>
Python Code:
! pip3 install -U google-cloud-automl --user
! pip3 install google-cloud-storage
import os
if not os.getenv("AUTORUN"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" #@param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = 'us-central1' #@param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access
# to your Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AutoML, then don't execute this code
if not os.path.exists('/opt/deeplearning/metadata/env_version'):
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this tutorial in a notebook locally, replace the string
# below with the path to your service account key and run this cell to
# authenticate your Google Cloud account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json
# Log in to your account on Google Cloud
! gcloud auth login
BUCKET_NAME = "[your-bucket-name]" #@param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION gs://$BUCKET_NAME
! gsutil ls -al gs://$BUCKET_NAME
import json
import os
import sys
import time
from google.cloud import automl
from google.protobuf.json_format import MessageToJson
from google.protobuf.json_format import ParseDict
from googleapiclient.discovery import build
# AutoML location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
def automl_client():
return automl.AutoMlClient()
def prediction_client():
return automl.PredictionServiceClient()
def operations_client():
return automl.AutoMlClient()._transport.operations_client
clients = {}
clients["automl"] = automl_client()
clients["prediction"] = prediction_client()
clients["operations"] = operations_client()
for client in clients.items():
print(client)
IMPORT_FILE = "gs://cloud-ml-data/img/flower_photos/train_set.csv"
#%%capture
! gsutil cp -r gs://cloud-ml-data/img/flower_photos/ gs://$BUCKET_NAME
import tensorflow as tf
all_files_csv = ! gsutil cat $IMPORT_FILE
all_files_csv = [ l.replace("cloud-ml-data/img", BUCKET_NAME) for l in all_files_csv ]
IMPORT_FILE = "gs://" + BUCKET_NAME + "/flower_photos/train_set.csv"
with tf.io.gfile.GFile(IMPORT_FILE, 'w') as f:
for l in all_files_csv:
f.write(l + "\n")
! gsutil cat $IMPORT_FILE | head -n 10
dataset = {
"display_name": "flowers_" + TIMESTAMP,
"image_classification_dataset_metadata": {
"classification_type": "MULTICLASS",
},
}
print(MessageToJson(
automl.CreateDatasetRequest(
parent=PARENT,
dataset=dataset,
).__dict__["_pb"])
)
request = clients["automl"].create_dataset(
parent=PARENT,
dataset=dataset,
)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split('/')[-1]
print(dataset_id)
input_config = {
"gcs_source": {
"input_uris": [IMPORT_FILE],
},
}
print(MessageToJson(
automl.ImportDataRequest(
name=dataset_short_id,
input_config=input_config
).__dict__["_pb"])
)
request = clients["automl"].import_data(
name=dataset_id,
input_config=input_config
)
result = request.result()
print(MessageToJson(result))
model = {
"display_name": "flowers_" + TIMESTAMP,
"dataset_id": dataset_short_id,
"image_classification_model_metadata": {
"train_budget_milli_node_hours": 8000,
},
}
print(MessageToJson(
automl.CreateModelRequest(
parent=PARENT,
model=model,
).__dict__["_pb"])
)
request = clients["automl"].create_model(
parent=PARENT,
model=model,
)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
# The full unique ID for the training pipeline
model_id = result.name
# The short numeric ID for the training pipeline
model_short_id = model_id.split('/')[-1]
print(model_short_id)
request = clients["automl"].list_model_evaluations(
parent=model_id,
)
import json
model_evaluations = [
json.loads(MessageToJson(me.__dict__["_pb"])) for me in request
]
# The evaluation slice
evaluation_slice = request.model_evaluation[0].name
print(json.dumps(model_evaluations, indent=2))
request = clients["automl"].get_model_evaluation(
name=evaluation_slice,
)
print(MessageToJson(request.__dict__["_pb"]))
test_items = !gsutil cat $IMPORT_FILE | head -n2
if len(str(test_items[0]).split(',')) == 3:
_, test_item_1, test_label_1 = str(test_items[0]).split(',')
_, test_item_2, test_label_2 = str(test_items[1]).split(',')
else:
test_item_1, test_label_1 = str(test_items[0]).split(',')
test_item_2, test_label_2 = str(test_items[1]).split(',')
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
file_1 = test_item_1.split('/')[-1]
file_2 = test_item_2.split('/')[-1]
! gsutil cp $test_item_1 gs://$BUCKET_NAME/$file_1
! gsutil cp $test_item_2 gs://$BUCKET_NAME/$file_2
test_item_1 = "gs://" + BUCKET_NAME + "/" + file_1
test_item_2 = "gs://" + BUCKET_NAME + "/" + file_2
import tensorflow as tf
import json
gcs_input_uri = "gs://" + BUCKET_NAME + '/test.csv'
with tf.io.gfile.GFile(gcs_input_uri, 'w') as f:
f.write(test_item_1 + '\n')
f.write(test_item_2 + '\n')
!gsutil cat $gcs_input_uri
input_config = {
"gcs_source": {
"input_uris": [gcs_input_uri]
},
}
output_config = {
"gcs_destination": {
"output_uri_prefix": "gs://" + f"{BUCKET_NAME}/batch_output/"
}
}
batch_prediction = automl.BatchPredictRequest(
name=model_id,
input_config=input_config,
output_config=output_config
)
print(MessageToJson(
batch_prediction.__dict__["_pb"])
)
request = clients["prediction"].batch_predict(
request=batch_prediction
)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
destination_uri = batch_prediction.output_config.gcs_destination.output_uri_prefix[:-1]
! gsutil ls $destination_uri/*
! gsutil cat $destination_uri/prediction*/*.jsonl
request = clients["automl"].deploy_model(
name=model_id
)
result = request.result()
print(MessageToJson(result))
test_item = !gsutil cat $IMPORT_FILE | head -n1
test_item = test_item[0].split(",")[0]
with tf.io.gfile.GFile(test_item, "rb") as f:
content = f.read()
payload = [{
"image": {
"image_bytes": content
}
}]
params = {"score_threshold": "0.8"}
prediction_r = automl.PredictRequest(
name=model_id,
payload=payload,
params=params
)
print(MessageToJson(
automl.PredictRequest(
name=model_id,
payload=payload,
params=params
).__dict__["_pb"])
)
request = clients["prediction"].predict(
name=model_id,
payload=payload,
params=params
)
print(MessageToJson(request.__dict__["_pb"]))
request = clients["automl"].undeploy_model(
name=model_id
)
result = request.result()
print(MessageToJson(result))
# creating edge model for export
model_edge = {
"display_name": "flowers_edge_" + TIMESTAMP,
"dataset_id": dataset_short_id,
"image_classification_model_metadata":{
"train_budget_milli_node_hours": 8000,
"model_type": "mobile-versatile-1"
},
}
print(
MessageToJson(
automl.CreateModelRequest(
parent=PARENT,
model=model_edge
).__dict__["_pb"]
)
)
request = clients["automl"].create_model(
parent=PARENT,
model=model_edge
)
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
model_edge_id = result.name
output_config = {
"model_format": "tflite",
"gcs_destination": {
"output_uri_prefix": "gs://" + f"{BUCKET_NAME}/export/",
}
}
print(MessageToJson(
automl.ExportModelRequest(
name=model_edge_id,
output_config=output_config
).__dict__["_pb"])
)
request = clients["automl"].export_model(
name=model_edge_id,
output_config=output_config
)
result = request.result()
print(MessageToJson(result))
model_export_dir = output_config["gcs_destination"]["output_uri_prefix"]
! gsutil ls -r $model_export_dir
delete_dataset = True
delete_model = True
delete_bucket = True
# Delete the dataset using the AutoML fully qualified identifier for the dataset
try:
if delete_dataset:
clients['automl'].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the model using the AutoML fully qualified identifier for the model
try:
if delete_model:
clients['automl'].delete_model(name=model_id)
except Exception as e:
print(e)
# Delete the model using the AutoML fully qualified identifier for the model
try:
if delete_model:
clients['automl'].delete_model(name=model_edge_id)
except Exception as e:
print(e)
if delete_bucket and 'BUCKET_NAME' in globals():
! gsutil rm -r gs://$BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the Google cloud-storage library as well.
Step2: Restart the Kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your GCP account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: AutoML constants
Step12: Clients
Step13: Example output
Step14: Example output
Step15: Response
Step16: Example output
Step17: projects.locations.datasets.importData
Step18: Example output
Step19: Response
Step20: Example output
Step21: Example output
Step22: Response
Step23: Example output
Step24: Evaluate the model
Step25: Response
Step26: Example output
Step27: Response
Step28: Example output
Step29: Example output
Step30: Make the batch input file
Step31: Example output
Step32: Example output
Step33: Response
Step34: Example output
Step35: Example output
Step36: Response
Step37: Example output
Step38: Example output
Step39: Response
Step40: Example output
Step41: Call
Step42: Response
Step43: Example output
Step44: Example output
Step45: Response
Step46: Example output
Step47: projects.locations.models.export
Step48: Example output
Step49: Response
Step50: Example output
Step51: Example output
|
6,242
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import timeit
import aurora as au # import Aurora
import aurora.autodiff as ad # importing Aurora's automatic differentiation framework
import matplotlib.pyplot as plt
import seaborn as sbn
sbn.set()
BATCH_SIZE = 64
LR = 1e-4
USE_GPU = False
NUM_ITERS = 20
# utility functions
def display_images(image_dataset, n_rows, n_cols, graph_title='Sample Training Images'):
'''
Simple utility function for displaying images.
'''
plt.figure(figsize=(7, 5))
image_number = 1
for row in range(n_rows):
for col in range(n_cols):
plt.subplot(n_rows, n_cols, image_number)
plt.imshow(image_dataset[image_number-1, :], cmap='Greys_r')
plt.axis('off')
image_number += 1
plt.suptitle(graph_title)
plt.show()
def measure_accuracy(activation, X_val, y_val, batch_size=32, use_gpu=USE_GPU):
executor = ad.Executor([activation], use_gpu=use_gpu)
max_val = len(X_val) - len(X_val) % batch_size
y_val = y_val[0:max_val]
prediction = np.zeros(max_val)
for i in range(0, max_val, batch_size):
start = i
end = i + batch_size
X_batch, y_batch = X_val[start:end], y_val[start:end]
prob_val, = executor.run(feed_shapes={images: X_batch})
if use_gpu:
prob_val = prob_val.asnumpy()
prediction[start:end] = np.argmax(prob_val, axis=1)
correct = np.sum(np.equal(y_val, prediction))
percentage = (correct / len(prediction)) * 100.00
return percentage
data = au.datasets.MNIST(batch_size=BATCH_SIZE)
batch_generator = data.train_batch_generator()
batch = next(batch_generator)
sample = batch[0][0:15, :]
display_images(sample.reshape(-1, 28, 28), 3, 5)
def build_network(image, y, batch_size=32):
rand = np.random.RandomState(seed=1024)
reshaped_images = ad.reshape(image, newshape=(batch_size, 1, 28, 28))
# weight in (number_kernels, color_depth, kernel_height, kernel_width)
W1 = ad.Parameter(name='W1', init=rand.normal(scale=0.1, size=(10, 1, 5, 5)))
b1 = ad.Parameter(name='b1', init=rand.normal(scale=0.1, size=10))
conv1 = au.nn.conv2d(input=reshaped_images, filter=W1, bias=b1)
activation1 = au.nn.relu(conv1)
# size of activation1: batch_size x 10 x 24 x 24
# weight in (number_kernels, number_kernels of previous layer, kernel_height, kernel_width)
W2 = ad.Parameter(name='W2', init=rand.normal(scale=0.1, size=(5, 10, 5, 5)))
b2 = ad.Parameter(name='b2', init=rand.normal(scale=0.1, size=5))
conv2 = au.nn.conv2d(input=activation1, filter=W2, bias=b2)
activation2 = au.nn.relu(conv2)
# size of activation2: batch_size x 5 x 20 x 20 = batch_size x 2000
flatten = ad.reshape(activation2, newshape=(batch_size, 2000))
W3 = ad.Parameter(name='W3', init=rand.normal(scale=0.1, size=(2000, 500)))
b3 = ad.Parameter(name='b3', init=rand.normal(scale=0.1, size=500))
Z3 = ad.matmul(flatten, W3)
Z3 = Z3 + ad.broadcast_to(b3, Z3)
activation3 = au.nn.relu(Z3)
W4 = ad.Parameter(name='W4', init=rand.normal(scale=0.1, size=(500, 10)))
b4 = ad.Parameter(name='b4', init=rand.normal(scale=0.1, size=10))
logits = ad.matmul(activation3, W4)
logits = logits + ad.broadcast_to(b4, logits)
loss = au.nn.cross_entropy_with_logits(logits, y)
return loss, W1, b1, W2, b2, W3, b3, W4, b4, logits
n_iter = NUM_ITERS
start = timeit.default_timer()
data = au.datasets.MNIST(batch_size=BATCH_SIZE)
batch_generator = data.train_batch_generator()
# images in (batch_size, color_depth, height, width)
images = ad.Variable(name='images')
labels = ad.Variable(name='y')
loss, W1, b1, W2, b2, W3, b3, W4, b4, logits = build_network(images, labels, batch_size=64)
opt_params = [W1, b1, W2, b2, W3, b3, W4, b4]
optimizer = au.optim.Adam(loss, params=opt_params, lr=1e-4, use_gpu=USE_GPU)
training_errors = []
validation_erros = []
for i in range(n_iter):
X_batch, y_batch = next(batch_generator)
loss_now = optimizer.step(feed_dict={images: X_batch, labels: y_batch})
if i <= 10 or (i <= 100 and i % 10 == 0) or (i <= 1000 and i % 100 == 0) or (i <= 10000 and i % 500 == 0):
fmt_str = 'iter: {0:>5d} cost: {1:>8.5f}'
print(fmt_str.format(i, loss_now[0]))
if i % 10 == 0:
train_acc = measure_accuracy(logits, X_batch, np.argmax(y_batch, axis=1),
batch_size=BATCH_SIZE, use_gpu=USE_GPU)
training_errors.append((100.0 - train_acc))
X_valid, y_valid = data.validation()
valid_acc = measure_accuracy(logits, X_valid[0:BATCH_SIZE], y_valid[0:BATCH_SIZE],
batch_size=BATCH_SIZE, use_gpu=USE_GPU)
validation_erros.append((100.0 - valid_acc))
X_valid, y_valid = data.validation()
val_acc = measure_accuracy(logits, X_valid, y_valid, batch_size=BATCH_SIZE, use_gpu=USE_GPU)
print('Validation accuracy: {:>.2f}'.format(val_acc))
X_test, y_test = data.testing()
test_acc = measure_accuracy(logits, X_test, y_test, batch_size=BATCH_SIZE, use_gpu=USE_GPU)
print('Testing accuracy: {:>.2f}'.format(test_acc))
plt.plot(validation_erros, color='r', label='validation error')
plt.plot(training_errors, color='b', label='training error')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's Explore the Dataset
Step2: Building the Computational Graph
Step3: Training Our Model
Step4: Reporting Testing Accuracy and Plotting Training/Validation Errors
|
6,243
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.datasets import fetch_olivetti_faces
random_state = 32
dataset = fetch_olivetti_faces(shuffle=True, random_state=random_state)
X, y = dataset['data'], dataset['target']
n_x, n_y = dataset['images'][0].shape
X_data = X.reshape(-1, n_x, n_y).transpose(1, 2, 0)
from sklearn.feature_extraction.image import grid_to_graph
from rena import weighted_connectivity_graph
connectivity_ward = grid_to_graph(n_x, n_y, 1)
mask = np.ones((n_x, n_y))
connectivity_rena = weighted_connectivity_graph(X_data, n_features=X.shape[1],
mask=mask)
import time
from sklearn.cluster import AgglomerativeClustering
from rena import recursive_nearest_agglomeration
n_clusters = 150
ward = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity_ward,
linkage='ward')
ti_ward = time.clock()
ward.fit(X.T)
to_ward = time.clock() - ti_ward
labels_ward = ward.labels_
ti_rena = time.clock()
labels_rena = recursive_nearest_agglomeration(X, connectivity_rena,
n_clusters=n_clusters)
to_rena = time.clock() - ti_rena
print('Time Ward: %0.3f, Time ReNA: %0.3f' % (to_ward, to_rena))
from rena import reduce_data, approximate_data
X_red_rena = reduce_data(X, labels_rena)
X_red_ward = reduce_data(X, labels_ward)
X_approx_rena = approximate_data(X_red_rena, labels_rena)
X_approx_ward = approximate_data(X_red_ward, labels_ward)
%matplotlib inline
import matplotlib.pyplot as plt
fig, axx = plt.subplots(3, 4, **{'figsize': (10, 5)})
plt.gray()
for i in range(4):
axx[0, i].imshow(X[i + 30].reshape(n_x, n_y))
axx[0, i].set_axis_off()
axx[0, 0].set_title('Original')
axx[1, i].imshow(X_approx_ward[i + 30].reshape(n_x, n_y))
axx[1, i].set_axis_off()
axx[1, 0].set_title('Ward: approximated')
axx[2, i].imshow(X_approx_rena[i + 30].reshape(n_x, n_y))
axx[2, i].set_axis_off()
axx[2, 0].set_title('ReNA: approximated')
# saving results
fig.savefig('figures/faces.png', bbox_to_inches='tight')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the connectivity (spatial structure)
Step2: Custering
Step3: Results visualization
|
6,244
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 3
sample_id = 1
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
x = x.astype(float)
x /= 255
return x
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
from sklearn import preprocessing
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
encoder = preprocessing.LabelBinarizer()
encoder.fit(x)
encoder.classes_ = np.array(list(range(10)))
labels = encoder.transform(x)
return labels
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# Implement Function
return tf.placeholder(tf.float32, shape = [None, *image_shape], name = 'x')
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# Implement Function
return tf.placeholder(tf.float32, shape = [None, n_classes], name = 'y')
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# Implement Function
return tf.placeholder(tf.float32, name = 'keep_prob')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# Implement Function
channels = int(x_tensor.get_shape()[3])
weights = tf.Variable(
tf.truncated_normal([conv_ksize[0], conv_ksize[1], channels, conv_num_outputs], stddev=0.1, dtype=tf.float32))
conv = tf.nn.conv2d(x_tensor, weights, strides=[1, conv_strides[0], conv_strides[1], 1], padding='SAME')
bias = tf.Variable(tf.zeros([conv_num_outputs], dtype=tf.float32))
relu = tf.nn.relu(tf.nn.bias_add(conv, bias))
max_pool = tf.nn.max_pool(relu, ksize=[1, pool_ksize[0], pool_ksize[1], 1],
strides=[1, pool_strides[0], pool_strides[1], 1], padding='SAME')
return max_pool
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# Implement Function
tensor_shape = x_tensor.get_shape().as_list()
final_shape = tensor_shape[1] * tensor_shape[2] * tensor_shape[3]
flat_output = tf.reshape(x_tensor, [-1, final_shape])
#flat_output = tf.contrib.layers.flatten(inputs=x_tensor)
return flat_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# Implement Function
weights_size = x_tensor.get_shape().as_list()[1]
weights = tf.Variable(tf.truncated_normal([weights_size, num_outputs], stddev=0.1, dtype=tf.float32))
bias = tf.Variable(tf.zeros([num_outputs],dtype=tf.float32))
fully_connected = tf.nn.relu(tf.add(tf.matmul(x_tensor, weights), bias))
#fully_connected = tf.contrib.layers.fully_connected(inputs=x_tensor, num_outputs=num_outputs, activation_fn=tf.nn.relu)
return fully_connected
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
weights_size = x_tensor.get_shape().as_list()[1]
weights = tf.Variable(tf.truncated_normal([weights_size, num_outputs], stddev=0.1, dtype=tf.float32))
bias = tf.Variable(tf.zeros([num_outputs],dtype=tf.float32))
fully_connected = tf.add(tf.matmul(x_tensor, weights), bias)
#fully_connected = tf.layers.dense(inputs=x_tensor, units=num_outputs)
return fully_connected
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
x_tensor = x
conv_num_output = 16
fully_num_output = 256
conv_ksize = (3, 3)
conv_strides = (1, 1)
pool_ksize = (3, 3)
pool_strides = (2, 2)
num_outputs = 10
conv_layer1 = conv2d_maxpool(x_tensor, conv_num_output, conv_ksize, conv_strides, pool_ksize, pool_strides)
conv_layer2 = conv2d_maxpool(conv_layer1, conv_num_output*4, conv_ksize, conv_strides, pool_ksize, pool_strides)
# Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
flattened = flatten(conv_layer2)
# Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
fully_connected_layer = fully_conn(flattened, fully_num_output)
fully_connected_layer = tf.nn.dropout(fully_connected_layer, keep_prob=keep_prob)
fully_connected_layer = fully_conn(fully_connected_layer/2, fully_num_output)
fully_connected_layer = tf.nn.dropout(fully_connected_layer/2, keep_prob=keep_prob)
fully_connected_layer = fully_conn(fully_connected_layer/4, fully_num_output)
# Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
output_layer = output(fully_connected_layer, num_outputs)
# return output
return output_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# Implement Function
session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# Calculate batch loss and accuracy
keep_probability = 1.0
validation_accuracy = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels, keep_prob: keep_probability})
loss = session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})
print('Loss = {} - Validation Accuracy = {:3.2f}%'.format(loss, (validation_accuracy*100)))
# TODO: Tune Parameters
epochs = 20
batch_size = 256
keep_probability = 0.5
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
6,245
|
<ASSISTANT_TASK:>
Python Code:
import csv # module used for reading and converting .CSV files
import os # module that enables local operating system dependent commands
filepath = 'C:/Users/Radley/Downloads/' # store file location as a string
filename = 'lbl.csv' # store the file name as a string
def printlocalCSV(filepath, filename):
file = os.path.join(filepath,filename) # join filepath/name into one string
csvfile = open(file) # open the file with Python
reader = csv.reader(csvfile) # read the CSV with csv.reader
for row in reader: # for every row in the CSV...
print(', '.join(row))
# Display/print the CSV with a comma and space separating each element
csvfile.close() # close the file
def importlocalCSV(filepath, filename):
datetime = []
cpm = []
# Set up an empty lists so you can place the data into it.
line = 0
# We want to ignore the 1st row of headers, so we set a variable to
# count the row we are iterating on.
file = os.path.join(filepath,filename)
with open(file) as csvfile:
# It is good practice to use the with statement when dealing with files
# because it automatically closes the file after the nested lines
# This with command performs same function as csvfile = open(file)
reader = csv.reader(csvfile)
for row in reader:
if line != 0:
# Conditional statement to ignore first row of headers
datetime.append(row[0])
# Append means ATTACH. We are attaching data in the 1st
# column to our list, row by row
cpm.append(float(row[6]))
# The command float records each element as a numerical
# value rather than a character string.
line = line + 1
# Progress iteration forward by adding +1 to line
return datetime, cpm
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Normally, you're file of interest is not located in the default working directory of Python. os.chdir() changes the working directory so we can open our file of interest.
Step2: The following module will read the CSV with csv.reader and display the data in your console one row at a time. This display command inserts a comma and space between each data entry, effectively reprinting the .CSV in your Python console.
Step3: The following module is an example of reading a CSV and importing the data as variables in Python. In many circumstances, you will want to import the data so that it can be used in Python. In addition, I use csv.reader with different, more efficient syntax
|
6,246
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
import httplib2 # pip install httplib2
import json # déjà installée, sinon : pip install json
import apiclient.discovery # pip install google-api-python-client
import bs4 # déjà ja installée, sinon : pip install bs4
import nltk # pip install nltk --> sous Windows, il faut aller à http://www.lfd.uci.edu/~gohlke/pythonlibs/
nltk.__version__
#remplacer par VOTRE clé
import os
try:
from pyquickhelper.loghelper import get_password
API_KEY = get_password("gapi", "ensae_teaching_cs,key")
except Exception as e:
print(e)
if False: # à remplacer par une autre API
# Entrer le nom d'une personne ayant un compte google plus public
Q = "Tim O'Reilly"
# Se connecter à l'API (méthode Oauth2)
service = apiclient.discovery.build('plus', 'v1', http=httplib2.Http(),
developerKey=API_KEY)
# Récupérer les feeds
people_feed = service.people().search(query=Q).execute()
# Imprimer le json récupéré
res = json.dumps(people_feed['items'], indent=1)
print(res if len(res) < 1000 else res[:1000] + "...")
if False: # à remplacer par une autre API
# Parce que l'on travaille sur un Notebook il est possible d'afficher facilement les images correspondantes
# l'identifiant unique d'avatar google plus et le nom
from IPython.core.display import HTML
html = []
for p in people_feed['items']:
html += ['<p><img src="{}" /> {}: {}</p>'.format(p['image']['url'], p['id'], p['displayName'])]
HTML(''.join(html[:5]))
if False: # à remplacer par une autre API
USER_ID = '107033731246200681024'
activity_feed = service.activities().list(
userId=USER_ID,
collection='public',
maxResults='100' # Max allowed per API
).execute()
res = json.dumps(activity_feed, indent=1)
print(res if len(res) < 1000 else res[:1000] + "...")
import json
with open("ressources_googleplus/107033731246200681024.json", "r", encoding="utf-8") as f:
activity_feed = json.load(f)
res = json.dumps(activity_feed, indent=1)
print(res if len(res) < 1000 else res[:1000] + "...")
from bs4 import BeautifulSoup
def cleanHtml(html):
if html == "":
return ""
return BeautifulSoup(html, 'html.parser').get_text()
try:
print(activity_feed[0]['object']['content'])
print("\n")
print(cleanHtml(activity_feed[0]['object']['content']))
except Exception as e:
print(e)
if False: # à remplacer par une autre API
import json
import apiclient.discovery
MAX_RESULTS = 200 # limite fixée à 100 résultats par requete => on va itérer sur une boucle pour en avoir 200
activity_feed = service.activities().list(
userId=USER_ID,
collection='public',
maxResults='100'
)
activity_results = []
while activity_feed != None and len(activity_results) < MAX_RESULTS:
activities = activity_feed.execute()
if 'items' in activities:
for activity in activities['items']:
if activity['object']['objectType'] == 'note' and activity['object']['content'] != '':
activity['title'] = cleanHtml(activity['title'])
activity['object']['content'] = cleanHtml(activity['object']['content'])
activity_results += [activity]
# list_next permet de passer à la requete suivante
activity_feed = service.activities().list_next(activity_feed, activities)
# on écrit le résultat dans un fichier json
import os
if not os.path.exists("ressources_googleplus"):
os.mkdir("ressources_googleplus")
f = open('./ressources_googleplus/' + USER_ID + '.json', 'w')
f.write(json.dumps(activity_results, indent=1))
f.close()
print(str(len(activity_results)), "activités écrites dans", f.name)
import json
with open("ressources_googleplus/107033731246200681024.json", "r", encoding="utf-8") as f:
activity_results = json.load(f)
corpus = {
'a' : "Mr. Green killed Colonel Mustard in the study with the candlestick. \
Mr. Green is not a very nice fellow.",
'b' : "Professor Plum has a green plant in his study.",
'c' : "Miss Scarlett watered Professor Plum's green plant while he was away \
from his office last week."
}
terms = {
'a' : [ i.lower() for i in corpus['a'].split() ],
'b' : [ i.lower() for i in corpus['b'].split() ],
'c' : [ i.lower() for i in corpus['c'].split() ]
}
from math import log
QUERY_TERMS = ['mr.', 'green']
def tf(term, doc, normalize=True):
doc = doc.lower().split()
if normalize:
return doc.count(term.lower()) / float(len(doc))
else:
return doc.count(term.lower()) / 1.0
def idf(term, corpus):
num_texts_with_term = len([True for text in corpus if term.lower() \
in text.lower().split()])
try:
return 1.0 + log(float(len(corpus)) / num_texts_with_term)
except ZeroDivisionError:
return 1.0
def tf_idf(term, doc, corpus):
return tf(term, doc) * idf(term, corpus)
for (k, v) in sorted(corpus.items()):
print(k, ':', v)
print('\n')
query_scores = {'a': 0, 'b': 0, 'c': 0}
for term in [t.lower() for t in QUERY_TERMS]:
for doc in sorted(corpus):
print('TF({}): {}'.format(doc, term), tf(term, corpus[doc]))
print('IDF: {}'.format(term, ), idf(term, corpus.values()))
print('\n')
for doc in sorted(corpus):
score = tf_idf(term, corpus[doc], corpus.values())
print('TF-IDF({}): {}'.format(doc, term), score)
query_scores[doc] += score
print('\n')
print("Score TF-IDF total pour le terme '{}'".format(' '.join(QUERY_TERMS), ))
for (doc, score) in sorted(query_scores.items()):
print(doc, score)
import nltk
# nltk donne accès a des methodes, mais aussi à des données, qui faut télécharge grâce à la commande .download()
nltk.download('stopwords')
from pprint import pprint
len(activity_results)
if len(activity_results) > 0:
pprint(activity_results[0])
if len(activity_results) > 0:
pprint(activity_results[0]['object']['content'])
all_content = " ".join([ a['object']['content'] for a in activity_results ])
print("Nombre de caractères : ",len(all_content))
print('\n')
#Tokenisation naïve sur les espaces entre les mots => on obtient une liste de mots
tokens = all_content.split()
#On transforme cette liste en objet nltk "Text" (objet chaine de caractère qui conserve la notion de tokens, et qui
#comprend un certain nombre de méthodes utiles pour explorer les données.
text = nltk.Text(tokens)
#Comme par exemple "concordance" : montre les occurences d'un mot dans son contexte
print("Exemples d'occurences du terme 'open' :")
text.concordance("open")
print('\n')
# Analyse de la fréquence des termes d'intérêt
fdist = text.vocab()
print("Co-occurences fréquentes :")
colloc = text.collocation_list()
print(colloc)
print('\n')
print("Nombre de mots :", len(tokens))
print('\n')
print("Nombre de mots uniques :",len(fdist.keys()))
print('\n')
print("Nombre de mots uniques v2 :",len(set(tokens)))
print("Nombre d'occurences du terme 'open' :",fdist["open"])
print("Nombre d'occurences du terme 'source' :", fdist["source"])
print("Nombre d'occurences du terme 'web' :", fdist["web"])
print("Nombre d'occurences du terme 'API' :",fdist["API"])
print('\n')
#100 tokens les plus fréquents
top100_items = sorted(fdist.items(),key=lambda x: x[1],reverse=True)[:100]
#sans les fréquences
top100 = [t[0] for t in top100_items]
print("Top 100 :", top100)
print('\n')
#sans les termes trop frequents ("stopwords")
top100_without_stopwords = [w for w in top100 if w.lower() \
not in nltk.corpus.stopwords.words('english')]
print("Top 100 sans les mots fréquents :", top100_without_stopwords)
print('\n')
long_words_not_urls = [w for w in fdist.keys() if len(w) > 15 and not w.startswith("http")]
print("Longs mots sans les urls :", long_words_not_urls)
print('\n')
# Nombre d'urls
print("Nombre d'urls :", len([w for w in fdist.keys() if w.startswith("http")]))
print('\n')
# Enumerate the frequency distribution
for rank, word in enumerate(sorted(fdist.items(),key=lambda x: x[1],reverse=True)):
print(rank, word)
if rank > 75:
print("....")
break
fdist = text.vocab()
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(16,4))
fdist.plot(100,cumulative=True);
import json
import nltk
path = 'ressources_googleplus/107033731246200681024.json'
text_data = json.loads(open(path).read())
QUERY_TERMS = ['open','data']
activities = [activity['object']['content'].lower().split() \
for activity in text_data \
if activity['object']['content'] != ""]
# Le package TextCollection contient un module tf-idf
tc = nltk.TextCollection(activities)
relevant_activities = []
for idx in range(len(activities)):
score = 0
for term in [t.lower() for t in QUERY_TERMS]:
score += tc.tf_idf(term, activities[idx])
if score > 0:
relevant_activities.append({'score': score, 'title': text_data[idx]['title'],
'url': text_data[idx]['url']})
# Tri par score et présentation des résultats
relevant_activities = sorted(relevant_activities,
key=lambda p: p['score'], reverse=True)
c=0
for activity in relevant_activities:
if c < 6:
print(activity['title'])
print('\tLink: {}'.format(activity['url']))
print('\tScore: {}'.format(activity['score']))
c+=1
import json
import nltk
path = 'ressources_googleplus/107033731246200681024.json'
data = json.loads(open(path).read())
# Sélection des textes qui ont plus de 1000 mots
data = [ post for post in json.loads(open(path).read()) \
if len(post['object']['content']) > 1000 ]
all_posts = [post['object']['content'].lower().split()
for post in data ]
tc = nltk.TextCollection(all_posts)
# Calcul d'une matrice terme de recherche x document
# Renvoie un score tf-idf pour le terme dans le document
td_matrix = {}
for idx in range(len(all_posts)):
post = all_posts[idx]
fdist = nltk.FreqDist(post)
doc_title = data[idx]['title']
url = data[idx]['url']
td_matrix[(doc_title, url)] = {}
for term in fdist.keys():
td_matrix[(doc_title, url)][term] = tc.tf_idf(term, post)
distances = {}
for (title1, url1) in td_matrix.keys():
distances[(title1, url1)] = {}
(min_dist, most_similar) = (1.0, ('', ''))
for (title2, url2) in td_matrix.keys():
#copie des valeurs (un dictionnaire étant mutable)
terms1 = td_matrix[(title1, url1)].copy()
terms2 = td_matrix[(title2, url2)].copy()
#on complete les gaps pour avoir des vecteurs de même longueur
for term1 in terms1:
if term1 not in terms2:
terms2[term1] = 0
for term2 in terms2:
if term2 not in terms1:
terms1[term2] = 0
#on créé des vecteurs de score pour l'ensemble des terms de chaque document
v1 = [score for (term, score) in sorted(terms1.items())]
v2 = [score for (term, score) in sorted(terms2.items())]
#calcul des similarité entre documents : distance cosine entre les deux vecteurs de scores tf-idf
distances[(title1, url1)][(title2, url2)] = \
nltk.cluster.util.cosine_distance(v1, v2)
import pandas as p
df_dist=p.DataFrame(distances)
df_dist.iloc[:5,:5]
import nltk
sentence = "Mr. Green killed Colonel Mustard in the study with the " + \
"candlestick. Mr. Green is not a very nice fellow."
print(list(nltk.ngrams(sentence.split(), 2)))
txt = nltk.Text(sentence.split())
txt.collocation_list()
import json
import nltk
path = 'ressources_googleplus/107033731246200681024.json'
data = json.loads(open(path).read())
# Nombre de co-occurrences à trouver
N = 25
all_tokens = [token for activity in data for token in \
activity['object']['content'].lower().split()]
finder = nltk.BigramCollocationFinder.from_words(all_tokens)
finder.apply_freq_filter(2)
#filtre des mots trop fréquents
finder.apply_word_filter(lambda w: w in nltk.corpus.stopwords.words('english'))
bigram_measures = nltk.collocations.BigramAssocMeasures()
collocations = finder.nbest(bigram_measures.jaccard, N)
for collocation in collocations:
c = ' '.join(collocation)
print(c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Le traitement automatique des langues (ou Natural Language Processing) propose un ensemble de méthodes permettant (entre autres)
Step2: Récupération de la clé d'API Google +
Step3: Connexion à l'API, et requête d'une personne ayant un compte Google + (avec activités publiques)
Step4: [
Step5: Requete sur l'activité de la personne sélectionnée
Step6: {
Step7: Nettoyage des données textuelles avec BS4
Step8: Récupération des données et stockage
Step9: Analyse des données textuelles - TD-IDF, similarité cosine et n-grams
Step10: Exemple sur un petit corpus de 3 documents
Step11: Le score td-idf pour le terme "Mr. Green" est le plus élevé pour le document a.
Step12: Exercice 2
Step13: Exercice 3
Step14: Exercice 4
Step15: Analyse des co-occurences dans les données textuelles issues de Google +
|
6,247
|
<ASSISTANT_TASK:>
Python Code:
2 * 4 - (7 - 1) / 3 + 1.0
1 / 0
1.0 / 0.0
3 / 2
3 // 2
2 ** 16
2 + 3j
1j
# Valor absoluto
abs(2 + 3j)
abs(_13)
int(18.6)
round(18.6)
float(1)
complex(2)
str(256568)
a = 2.
type(a)
isinstance(a, float)
print('hola mundo')
max(1,5,8,7)
min(-1,1,0)
a = 1 + 2j
b = 3.14159
b
x, y = 1, 2
x, y
x, y = y, x
x, y
x == y
print(x != y)
print(x < y)
print(x <= y)
print(x > y)
print(x >= y)
# incluso:
x = 5.
6. < x < 8.
1 + 1j < 0 + 1j
# En las cadenas de texto sí existe un orden
'aaab' > 'ba'
True and False
not False
True or False
# Una curiosidad:
(True + True) * 10
una_lista = [1, 2, 3.0, 4 + 0j, "5"]
una_tupla = (1, 2, 3.0, 4 + 0j, "5")
print(una_lista)
print(una_tupla)
print(una_lista == una_tupla)
tupla_sin_parentesis = 2,5,6,9,7
type(tupla_sin_parentesis)
2 in una_lista
2 in una_tupla
len(una_lista)
print(una_lista[0]) # Primer elemento, 1
print(una_tupla[1]) # Segundo elemento, 2
print(una_lista[0:2]) # Desde el primero hasta el tercero, excluyendo este: 1, 2
print(una_tupla[:3]) # Desde el primero hasta el cuarto, excluyendo este: 1, 2, 3.0
print(una_lista[-1]) # El último: 4 + 0j
print(una_tupla[:]) # Desde el primero hasta el último
print(una_lista[::2]) # Desde el primero hasta el último, saltando 2: 1, 3.0
mis_asignaturas = [
['Álgebra', 'Cálculo', 'Física'],
['Mecánica', 'Termodinámica'],
['Sólidos', 'Electrónica']
]
mis_asignaturas
print(x,y)
if x > y:
print("x es mayor que y")
print("x sigue siendo mayor que y")
if 1 < 0:
print("1 es menor que 0")
print("1 sigue siendo menor que 0") # <-- ¡Mal!
if 1 < 0:
print("1 es menor que 0")
print("1 sigue siendo menor que 0")
print(x,y)
if x > y:
print("x es mayor que y")
else:
print("x es menor que y")
print(x, y)
if x < y:
print("x es menor que y")
elif x == y:
print("x es igual a y")
else:
print("x no es ni menor ni igual que y")
ii = -2
while ii < 5:
print(ii)
ii += 1
ii = 0
while ii < 5:
print(ii)
ii += 1
if ii == 3:
break
ii = 0
while ii < 5:
print(ii)
ii += 1
if ii == 3:
break
else:
print("El bucle ha terminado")
ii = 0
while ii < 5:
print(ii)
ii += 1
#if ii == 3:
#break
else:
print("El bucle ha terminado")
for ii in (1,2,3,4,5):
print(ii)
for nombre in "Juan", "Luis", "Carlos":
print(nombre)
for ii in range(3):
print(ii)
for jj in range(2, 5):
print(jj)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Las divisiones por cero lanzan un error
Step2: <div class="alert alert-info">Más adelante veremos cómo tratar estos errores. Por otro lado, cuando usemos NumPy esta operación devolverá `NaN`.</div>
Step3: Se puede forzar que la división sea entera con el operador //
Step4: Se puede elevar un número a otro con el operador **
Step5: Otro tipo que nos resultará muy útil son los complejos
Step6: <div class="alert alert-info"><strong>Tip de IPython</strong>
Step7: Podemos convertir variables a int, float, complex, str...
Step8: Podemos comprobar el tipo de una variable
Step9: Otras funciones útiles son
Step10: ¡Acabas de utilizar funciones! Como ves es una manera bastante estándar
Step11: En Python la asignación no imprime el resultado por pantalla, al contrario de como sucede en MATLAB y Octave (salvo que se incluya el punto y coma al final). La mejor manera de visualizar la variable que acabamos de asignar es esta
Step12: En una celda podemos escribir código que ocupe varias líneas. Si la última de ellas devuelve un resultado, este se imprimirá.
Step13: <div class="alert alert-info">Podemos realizar **asignación múltiple**, que hemos hecho en la celda anterior con las variables `x` e `y` para intercambiar valores de manera intuitiva
Step14: Los operadores de comparación son
Step15: Si la ordenación no tiene sentido nos devolverá un error
Step16: Booleanos
Step17: Otros tipos de datos
Step18: Para las tuplas, podemos incluso obviar los paréntesis
Step19: En los dos tipos podemos
Step20: Saber cuandos elementos tienen con la función len
Step21: Podemos indexar las secuencias, utilizando la sintaxis [<inicio>
Step22: Veremos más cosas acerca de indexación en NumPy, así que de momento no te preocupes. Sólo recuerda una cosa
Step23: Esto nos será de gran ayuda en el futuro para construir arrays.
Step24: Si queremos añadir ramas adicionales al condicional, podemos emplear la sentencia elif (abreviatura de else if). Para la parte final, que debe ejecutarse si ninguna de las condiciones anteriores se ha cumplido, usamos la sentencia else
Step25: Estructuras de control (II)
Step26: <div class="alert alert-info"><strong>Tip</strong>
Step27: Un bloque else justo después del bucle se ejecuta si este no ha sido interrumpido por nosotros
Step28: for
|
6,248
|
<ASSISTANT_TASK:>
Python Code:
# execute this cell
np.random.seed(0)
x = np.concatenate([stats.cauchy(-5, 1.8).rvs(500),
stats.cauchy(-4, 0.8).rvs(2000),
stats.cauchy(-1, 0.3).rvs(500),
stats.cauchy(2, 0.8).rvs(1000),
stats.cauchy(4, 1.5).rvs(500)])
# truncate values to a reasonable range
x = x[(x > -15) & (x < 15)]
# complete
# plt.hist(
# complete
# plt.hist
# execute this cell
from sklearn.neighbors import KernelDensity
def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs):
kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs)
kde_skl.fit(data[:, np.newaxis])
log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density)
return np.exp(log_pdf)
# complete
# plt.hist(
# grid =
# PDF =
# plt.plot(
# execute this cell
def bayesian_blocks(t):
Bayesian Blocks Implementation
By Jake Vanderplas. License: BSD
Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
#-----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
#-----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
#-----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
#-----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
# complete
plt.hist(
# complete
# complete
# complete
import astropy.stats.bayesian_blocks as bb
# complete
# complete
# complete
# execute this cell
kplr_hdul = fits.open('./data/kplr009726699-2011271113734_llc.fits')
# execute this cell
kplr_hdul.info()
# execute this cell
lcdata = kplr_hdul[1].data
lcdata.columns
# execute this cell
t = lcdata['TIME']
f = lcdata['PDCSAP_FLUX']
e = lcdata['PDCSAP_FLUX_ERR']
t = t[~np.isnan(f)]
e = e[~np.isnan(f)]
f = f[~np.isnan(f)]
nf = f / np.median(f)
ne = e / np.median(f)
# complete
# complete
edges =
#
#
#
plt.step(
# complete
your_data =
# complete
def stratified_bayesian_blocks(
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hey, nice histogram!
Step2: Problem 1b
Step4: Problem 1d
Step5: Problem 2a
Step6: Problem 2b
Step7: Problem 3c
Step8: Problem 3d
Step9: Problem 3f
Step10: Problem 3g
Step11: Putting these results in context
Step12: Problem 4b
Step13: Problem 4c
Step14: Cool, we have loaded in the FITS file. Let's look at what's in it
Step15: We want the light curve, so let's check out what's in that part of the file!
Step16: Problem 5b
Step17: Problem 5c
Step18: Concluding Remarks
|
6,249
|
<ASSISTANT_TASK:>
Python Code:
import urllib
import json
import time
import pandas as pd
import datetime
from arctic import Arctic
import arctic
import subprocess
import platform
import os
import krakenex
if platform.system() == "Darwin":
os.chdir('/users/'+os.getlogin()+'/MEGA/App')
if platform.system() == "Darwin":
subprocess.Popen(['/usr/local/bin/mongod', '--dbpath', '/users/'+os.getlogin()+'/MEGA/App/cryptodb', '--logpath', '/users/'+os.getlogin()+'/MEGA/App/cryptodb/krakendb.log', '--fork'])
k = krakenex.API()
k.load_key('kraken.key')
# Connect to Local MONGODB
krakendb = Arctic('localhost')
# Create the library - defaults to VersionStore
krakendb.initialize_library('Kraken')
# Access the library
kraken = krakendb['Kraken']
def updateTickData(pairs, db):
s = '\n Begin import of '+', '.join(pairs[0:len(pairs)-1])+' and '+pairs[-1]+'\n This could take some time! \n'
print(s)
tickdata_collection = {}
for pair in pairs:
print(pair+': ')
tickdata_collection[pair]=get_all_kraken_trades(pair, db = kraken)
s = '\nAll Pairs are up do date now!\n'
print(s)
return tickdata_collection
def getInfo(db):
infolist = kraken.list_versions()
s = '\n Last updates: \n'
print(s)
for list in infolist:
s = list['symbol']+' updated at ' + list['date'].strftime('%Y-%m-%d %H:%M:%S')+ ', Version: '+ str(list['version'])+'\n'
print(s)
snapshots = kraken.list_snapshots()
s = '\n Last snapshots: \n'
print(s)
for list in snapshots:
s = list+'\n'
print(s)
def get_all_kraken_trades(pair, since = None, db = None):
Input:
pair = pair name
since = unix datestamp, default is None (imports every trade from the beginning, this could take a long time)
Output:
Pandas DataFrame
history = pd.DataFrame( columns = ['price', 'volume', 'time', 'buy/sell', 'market/limit'])
if pair in db.list_symbols():
since = db.read(pair).metadata['last']
elif since == None:
since = 0
try:
while True:
data = urllib.request.urlopen("https://api.kraken.com/0/public/Trades?pair="+pair+"&since="+str(since)).read()
data = data.decode()
data = json.loads(data)
last = int(data['result']['last'])
data = data['result'][pair]
data = pd.DataFrame(data)
if data.empty:
break
dates = [datetime.datetime.fromtimestamp(ts) for ts in (data[2].values)]
data.index = pd.DatetimeIndex(dates)
data = data.iloc[:,0:5]
data.iloc[:,0:3] = data.iloc[:,0:3].astype(float)
data.columns = ['price', 'volume', 'time', 'buy/sell', 'market/limit']
history = history.append(data) #ignore_index=True)
since = last
print('imported data until: '+history.index[-1].strftime('%Y-%m-%d %H:%M:%S'))
time.sleep(3)
except Exception as e:
print(str(e))
db.append(pair, history, metadata={'last': last, 'source': 'Kraken'})
time.sleep(2)
alltrades = db.read(pair).data
return alltrades
def get_kraken_balance(db = None):
balance = k.query_private('Balance')['result']
df = pd.DataFrame(list(balance.items()))
df = df.transpose()
df.columns = df.iloc[0]
df = df.reindex(df.index.drop(0))
df = df.astype(float)
last = datetime.datetime.now()
df.index = pd.DatetimeIndex([last])
if db:
db.append('Balance', df, metadata={'last': last, 'source': 'Kraken'})
allbalance = db.read('Balance').data
return df, allbalance
else:
return df
def add_kraken_order(pair, buysell, ordertype, volume, **kwargs ):
'''
Input:
pair = asset pair
buysell = type of order (buy/sell)
ordertype = order type:
market
limit (price = limit price)
stop-loss (price = stop loss price)
take-profit (price = take profit price)
stop-loss-profit (price = stop loss price, price2 = take profit price)
stop-loss-profit-limit (price = stop loss price, price2 = take profit price)
stop-loss-limit (price = stop loss trigger price, price2 = triggered limit price)
take-profit-limit (price = take profit trigger price, price2 = triggered limit price)
trailing-stop (price = trailing stop offset)
trailing-stop-limit (price = trailing stop offset, price2 = triggered limit offset)
stop-loss-and-limit (price = stop loss price, price2 = limit price)
settle-position
price = price (optional. dependent upon ordertype)
price2 = secondary price (optional. dependent upon ordertype)
volume = order volume in lots
leverage = amount of leverage desired (optional. default = none)
oflags = comma delimited list of order flags (optional):
viqc = volume in quote currency (not available for leveraged orders)
fcib = prefer fee in base currency
fciq = prefer fee in quote currency
nompp = no market price protection
post = post only order (available when ordertype = limit)
starttm = scheduled start time (optional):
0 = now (default)
+<n> = schedule start time <n> seconds from now
<n> = unix timestamp of start time
expiretm = expiration time (optional):
0 = no expiration (default)
+<n> = expire <n> seconds from now
<n> = unix timestamp of expiration time
userref = user reference id. 32-bit signed number. (optional)
validate = validate inputs only. do not submit order (optional)
optional closing order to add to system when order gets filled:
close[ordertype] = order type
close[price] = price
close[price2] = secondary price
Output:
descr = order description info
order = order description
close = conditional close order description (if conditional close set)
txid = array of transaction ids for order (if order was added successfully)
'''
orderinfo = k.query_private('AddOrder', {'pair': pair,'type' : buysell,
'ordertype' : ordertype, 'volume' : volume,
**kwargs })
if bool(orderinfo['error']):
raise Exception(orderinfo['error'])
return orderinfo['result']['txid'], orderinfo['result']['descr']
get_kraken_balance()
getInfo(kraken)
pairs = ['XETHZEUR','XXBTZEUR', 'XZECZEUR', 'XXRPZEUR']
trades = updateTickData(pairs, kraken)
trades['XETHZEUR'].head()
%matplotlib inline
import matplotlib.dates as mdates
import numpy as np
from mpl_finance import candlestick_ohlc
import matplotlib
import matplotlib.pyplot as plt
stop = datetime.datetime.now()
start = stop-datetime.timedelta(days=90)
mask = (trades['XETHZEUR'].index > start) & (trades['XETHZEUR'].index <= datetime.datetime.now())
data = trades['XETHZEUR'].loc[mask]
ohlc = data.price.resample('1D').ohlc().dropna().iloc[1:,:]
volume = data.volume.resample('1D').sum().dropna().iloc[1:]
dateStamp = np.array(ohlc.index).astype('datetime64[s]')
dateStamp = dateStamp.tolist()
df = pd.DataFrame({'Datetime':dateStamp})
df['MPLDate'] = df['Datetime'].apply(lambda date: mdates.date2num(date.to_pydatetime()))
df.index=dateStamp
ohlc.insert(0,'MPLDate', df.MPLDate)
fig=plt.figure(figsize=(17, 8))
# Main Graph
a = plt.subplot2grid((10,8), (0,0), rowspan = 8, colspan = 8)
# Volume
a2 = plt.subplot2grid((10,8), (8,0), sharex = a, rowspan = 2, colspan = 8)
matplotlib.style.use("ggplot")
darkColor = "#183A54"
lightColor = "#00A3E0"
candlestick_ohlc(a, ohlc[['MPLDate', 'open', 'high', 'low', 'close']].astype(float).values, width=0.768, colorup=lightColor, colordown=darkColor)
a.set_ylabel("Price")
a.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(3))
a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M'))
a2.set_ylabel("Volume")
a2.fill_between(ohlc.MPLDate,0, volume.astype(float), facecolor='#183A54')
plt.setp(a.get_xticklabels(), visible=False)
sma=ohlc['close'].rolling(window=10).mean()
label = str(10)+" SMA"
a.plot(ohlc['MPLDate'],sma, label=label)
a.legend(loc=0)
pairs = ['XXBTZEUR']
import math
import threading
def automatedTradingStrategy():
fastMA = {}
slowMA = {}
mask = {}
tickdata_collection = {}
for pair in pairs:
actbalance, allbalance = get_kraken_balance(db = kraken)
tickdata_collection[pair]=get_all_kraken_trades(pair, db = kraken)
tickdata_collection[pair]=tickdata_collection[pair].price.resample('1H').ohlc().dropna().iloc[-241:-1,:]
fastMA[pair] = tickdata_collection[pair].close.ewm(span=120).mean()
slowMA[pair] = tickdata_collection[pair].close.ewm(span=240).mean()
mask[pair] = fastMA[pair][-1]>slowMA[pair]
if bool((actbalance.ZEUR.values >= 100) & (mask[pair][-1] == True)):
txid, descr = add_kraken_order(pair, 'buy', 'market', math.floor(float(actbalance.ZEUR))/tickdata_collection[pair].close[-1])
print('New Order: ' +str(txid)+ '\nInfo: ')
print(descr)
elif bool((actbalance[pair[0:4]].values > 0) & (mask[pair][-1] == False)):
txid, descr = gkd.add_kraken_order(pair, 'sell', 'market', int(actbalance[pair[0:4]].values) )
print('New Order: ' +str(txid)+ '\nInfo: ')
print(descr)
else:
print('Nothing new this hour!')
global t
t = threading.Timer(3600, automatedTradingStrategy)
t.start()
automatedTradingStrategy()
t.cancel()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The kraken API can be installed via pip
Step3: Let's now run the functions to see what happens
Step4: The function get_kraken_balance() returns a pandas Dataframe with the current balance. If you give the function your arctic database as input, the balance will be saved in the database and it also returns a Dataframe with all your past balance(if you've saved it in your database)
Step5: getInfo(db) prints among three others the last version of all items in the database with the exact date. It's also possible to snapshot your databse. This function will also print the snapshots you've created so far.
Step6: As I have already most of the data in the database, it was quite quick to update it. updateTickData(pairs, db) returns a collection of pandas Dataframes containing all trades of the desired pairs. Let's have a look at the first few trades of the pair ETH/EUR. For every trade we have the price, volume, a unix timestamp, whether it was buy or sell and whether it was at the market or a limit order.
Step7: What possibilities do we have, now that we can quickly load all the trades into the python workspace? For example we can do nice plots or with the Kraken API we can automate our trading strategies. The two following cells illustrate simple, such examples.
Step8: A nice thing is that we can customize our candlestick charts. We can choose the time frame(here the last 90 days) and with the pandas resample function we can create the desired ohlc data(here one day data). The mpl_finance module allows now to plot nice candlestick charts. It's also possible to plot some financial indicators like e.g. here a simple moving average. The same way as above we could add a MACD indicator for example. There is a library called TA-Lib which contains more than 200 indicators. It's worth to have a look at. Let's now look at one possibility to implement an automated trading strategy
Step9: This is a simple strategy just for the pair XBT/EUR but very complex strategies for more than one pair are also possible to code. This strategie states "buy/hold" if the exponential moving average of 120 hours (5 days) is above the exponential moving average of 240 hours (10 days) and "sell" if it's below. With the threading module we can execute the function every hour, so that it first updates the latest trade data and computes the relevant conditions afterwards. Then when the conditions are met it buys or sells at the market or it holds the current positions. To stop the function just call t.cancel(). I'm sure there are plenty possibilities to implement automated trading, that's just one of them and I'm not sure if it is a good one.
|
6,250
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.insert(1, '..')
import crowdastro.data
import io
import astropy.io.votable
import requests
import requests_cache
requests_cache.install_cache(cache_name='gator_cache', backend='sqlite', expire_after=None)
def fetch(subject):
if subject['metadata']['source'].startswith('C'):
# CDFS
catalog = 'chandra_cat_f05'
else:
# ELAIS-S1
catalog = 'elaiss1_cat_f05'
query = {
'catalog': catalog,
'spatial': 'box',
'objstr': '{} {}'.format(*subject['coords']),
'size': '120',
'outfmt': '3',
}
url = 'http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query'
r = requests.get(url, params=query)
votable = astropy.io.votable.parse_single_table(io.BytesIO(r.content), pedantic=False)
ras = votable.array['ra']
decs = votable.array['dec']
names = votable.array['object']
return zip(ras, decs, names)
all_galaxies = {}
for subject in crowdastro.data.get_all_subjects(atlas=True):
galaxies = fetch(subject)
for ra, dec, name in galaxies:
all_galaxies[name] = (ra, dec)
print(len(all_galaxies)) # dicts have unique keys, so there should be no duplicates.
import matplotlib.pyplot
import mclearn.viz
import numpy
%matplotlib inline
ras = numpy.array([g[0] for g in all_galaxies.values()])
decs = numpy.array([g[1] for g in all_galaxies.values()])
[numpy.min(ras), numpy.max(ras), numpy.min(decs), numpy.max(decs)]
matplotlib.pyplot.figure(figsize=(10, 10))
mclearn.viz.plot_hex_map(ras, decs, labels=True, vmax=50)
matplotlib.pyplot.show()
import astropy.convolution
import matplotlib.colors
ras_idx = numpy.linspace(numpy.min(ras), numpy.max(ras), num=400)
dec_idx = numpy.linspace(numpy.min(decs), numpy.max(decs), num=400)
heatmap, _, _ = numpy.histogram2d(ras, decs, bins=[ras_idx, dec_idx])
matplotlib.pyplot.figure(figsize=(10, 10))
matplotlib.pyplot.pcolor(
astropy.convolution.convolve(heatmap, astropy.convolution.Gaussian2DKernel(stddev=2)),
cmap='PuBu_r')
matplotlib.pyplot.axis('scaled')
matplotlib.pyplot.colorbar()
matplotlib.pyplot.xticks(range(0, 400, 50), ras_idx, rotation=75)
matplotlib.pyplot.yticks(range(0, 400, 50), dec_idx)
matplotlib.pyplot.show()
patch_size = 0.8
isolated_galaxies = []
for galaxy, (ra, dec) in all_galaxies.items(): # This is pretty slow! There are definitely better ways to evaluate this.
# Find a galaxy less than half a patch size away = 0.2 / 60 degrees away.
# If we find one, this galaxy is not isolated.
for other_galaxy, (other_ra, other_dec) in all_galaxies.items():
if other_galaxy == galaxy:
continue
if abs(ra - other_ra) <= 0.5*(patch_size) / 60 and abs(dec - other_dec) <= 0.5*(patch_size) / 60:
break
else:
isolated_galaxies.append(galaxy)
print(len(isolated_galaxies))
print('{:.02%}'.format(len(isolated_galaxies)/len(all_galaxies)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How many objects are in SWIRE $\cap$ RGZ–ATLAS?
Step2: So there are $56190$ galaxies in SWIRE $\cap$ RGZ–ATLAS. Let's also get that heatmap while we're here.
Step3: Little bit small to display like this. How about a heatmap with RA against DEC?
Step4: How many galaxies are isolated?
|
6,251
|
<ASSISTANT_TASK:>
Python Code:
from collections import defaultdict
import numpy as np
from mock import patch
from grove.simon.simon import Simon, create_valid_2to1_bitmap
mask = '110'
bm = create_valid_2to1_bitmap(mask, random_seed=42)
expected_map = {
'000': '001',
'001': '101',
'010': '000',
'011': '111',
'100': '000',
'101': '111',
'110': '001',
'111': '101'
}
for k, v in bm.items():
assert v == expected_map[k]
reverse_bitmap = defaultdict(list)
for k, v in bm.items():
reverse_bitmap[v].append(k)
expected_reverse_bitmap = {
'001': ['000', '110'],
'101': ['001', '111'],
'000': ['010', '100'],
'111': ['011', '101']
}
for k, v in reverse_bitmap.items():
assert sorted(v) == sorted(expected_reverse_bitmap[k])
with patch("pyquil.api.QuantumComputer") as qc:
# Need to mock multiple returns as an iterable
qc.run.side_effect = [
(np.asarray([0, 1, 1], dtype=int), ),
(np.asarray([1, 1, 1], dtype=int), ),
(np.asarray([1, 1, 1], dtype=int), ),
(np.asarray([1, 0, 0], dtype=int), ),
]
sa = Simon()
found_mask = sa.find_mask(qc, bm)
assert ''.join([str(b) for b in found_mask]) == mask, "Found mask is not expected mask"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simon's algorithm can be used to find the mask $m$ of a 2-to-1 periodic Boolean function defined by
Step2: To understand what a 2-to-1 function is let us revert the map and collect all keys tha point to the same value. As the assertion shows all values have 2 distinct origins
Step3: To use Simon's algorithm on a Quantum Hardware we need to define the connection to the QVM or QPU. However we don't have a real connection in this notebook, so we just mock out the response. If you run this notebook, ensure to replace cxn with a pyQuil connection object.
Step4: Now let's run Simon's algorithm. We instantiate the Simon object and then call its find_mask mehtod with the connection object and the 2-to-1 function whose mask we wish to find.
|
6,252
|
<ASSISTANT_TASK:>
Python Code:
all_scores = pd.read_excel("Grades/Book1.xlsx")
all_scores["Is A"] = all_scores["Final Score"] >= 90
# https://courses.cs.vt.edu/~cs1604/grading.html
letter_grade = []
for grade in all_scores["Final Score"]:
if grade >= 90:
letter_grade.append("A")
elif grade >= 80:
letter_grade.append("B")
elif grade >= 70:
letter_grade.append("C")
elif grade >= 60:
letter_grade.append("D")
else:
letter_grade.append("F")
all_scores["Letter Grade"] = letter_grade
all_scores["Total Exams"] = all_scores[["Final Exam", "Midterm 1", "Midterm 2"]].sum(
axis=1
)
all_scores.to_csv('all_scores.csv')
fig, axs = plt.subplots(3, 2, figsize=(13, 10.4))
plt.style.use("default")
sns.ecdfplot(data=all_scores["Total Homeworks"], legend=True, color="red", ax=axs[0, 0])
sns.ecdfplot(data=all_scores["Midterm 1"], legend=True, color="orange", ax=axs[0, 1])
sns.ecdfplot(data=all_scores["Midterm 2"], legend=True, color="blue", ax=axs[1, 0])
sns.ecdfplot(data=all_scores["Final Exam"], legend=True, color="green", ax=axs[1, 1])
sns.ecdfplot(data=all_scores["Total Exams"], legend=True, color="purple", ax=axs[2, 0])
sns.ecdfplot(data=all_scores["Final Score"], legend=True, color="red", ax=axs[2, 1])
# axs[0,1].set_title ('Score out of 100')
axs[2, 0].set_xlabel("Sum of 3 Exams")
fig.savefig("CDF Grades", dpi=400, facecolor="white")
fig, axs = plt.subplots(3, 2, figsize=(10, 9.4))
plt.style.use("default")
sns.histplot(x=all_scores["Total Homeworks"], kde=True, ax=axs[0, 0])
sns.histplot(x=all_scores["Midterm 1"], kde=True, ax=axs[0, 1])
sns.histplot(x=all_scores["Midterm 2"], kde=True, ax=axs[1, 0])
sns.histplot(x=all_scores["Final Exam"], kde=True, ax=axs[1, 1])
sns.histplot(x=all_scores["Total Exams"], kde=True, ax=axs[2, 0])
sns.histplot(x=all_scores["Final Score"], kde=True, ax=axs[2, 1])
axs[2, 0].set_xlabel("Sum of 3 Exams")
fig.savefig("HistPlot Grades", dpi=400, facecolor="white")
clusters_df = pd.read_csv("Clustered_Sessions_FCM.csv")
clusters_df.columns
# Adding time to the sessions clustered
time = pd.read_excel("Sessions_1D_old_names.xlsx")
time.columns
frames_we_want = clusters_df.session_number.unique()
time = time[time['session_number'].isin(frames_we_want)]
time = time.groupby(["session_number"], as_index=False).agg(
timestamp=("timestamp", "min"),
)
clustered_df_with_time = (
pd.merge(
clusters_df, time, left_on="session_number", right_on="session_number", how="inner"
)
)
spring_21_end = datetime(2021,5,14)
clustered_df_with_time = clustered_df_with_time[clustered_df_with_time['timestamp'] <= spring_21_end]
Fall20 = clustered_df_with_time[clustered_df_with_time['timestamp'].dt.year==2020]
len(Fall20), len(Fall20['user_id'].unique()), len(Fall20['curr_frameset_name'].unique())
Spring21 = clustered_df_with_time[clustered_df_with_time['timestamp'].dt.year==2021]
len(Spring21), len(Spring21['user_id'].unique()), len(Spring21['curr_frameset_name'].unique())
x = len(Fall20[Fall20['cluster']=='Credit Seeking']); y = len(Fall20[Fall20['cluster']=='Normal'])
x, y , x+y
x = len(Spring21[Spring21['cluster']=='Credit Seeking']); y= len(Spring21[Spring21['cluster']=='Normal'])
x, y , x+y
weeks5 = timedelta(weeks=5)
fall_20_up_to_mid1 = datetime(2020,9,28) # Exactly 5 weeks after the beginning of the semester
Fall20_mid1 = Fall20[Fall20['timestamp'] <= fall_20_up_to_mid1] # There's a problem here in Fall'20, it wasn't store all data starting from 24th August 2020
Fall20_mid1 = Fall20[Fall20['timestamp'] <= Fall20['timestamp'].min()+weeks5]
spring_21_up_to_mid1 = datetime(2021,3,4) # Exactly 5 weeks after the beginning of the semester
Spring21_mid1 = Spring21[Spring21['timestamp'] <= spring_21_up_to_mid1]
Spring21_mid1 = Spring21[Spring21['timestamp'] <= Spring21['timestamp'].min()+weeks5]
midterms_combined = pd.concat([Fall20_mid1, Spring21_mid1])
x = len(Fall20_mid1[Fall20_mid1['cluster']=='Credit Seeking']); y = len(Fall20_mid1[Fall20_mid1['cluster']=='Normal'])
x,y, x/(y+x), y/(y+x)
x = len(Spring21_mid1[Spring21_mid1['cluster']=='Credit Seeking']); y = len(Spring21_mid1[Spring21_mid1['cluster']=='Normal'])
x,y, x/(y+x), y/(y+x)
all_scores = pd.read_excel("Grades/Book1.xlsx")
all_scores["Is A"] = all_scores["Final Score"] >= 90
letter_grade = []
for grade in all_scores["Final Score"]:
if grade >= 90:
letter_grade.append("A")
elif grade >= 80:
letter_grade.append("B")
elif grade >= 70:
letter_grade.append("C")
elif grade >= 60:
letter_grade.append("D")
else:
letter_grade.append("F")
all_scores["Letter Grade"] = letter_grade
len(all_scores)
all_scores.columns
# Important:
Clustered_Users = midterms_combined.groupby(["user_id", "cluster"], as_index=False).agg(
Cluster_Count=("cluster", "count"),
FramesetName_nunique=("curr_frameset_name", "nunique"),
)
# Cond on Cluster Type --> Drop Cluster Type --> Rename Count to new Name to be able to merge
credit_seeking_student_count = (
Clustered_Users[Clustered_Users["cluster"] == "Credit Seeking"]
.drop(labels=["cluster"], axis=1)
.rename(
columns={
"Cluster_Count": "# CrSk Sessions",
"FramesetName_nunique": "# CrSk Framesets",
}
)
)
normal_student_count = (
Clustered_Users[Clustered_Users["cluster"] == "Normal"]
.drop(labels=["cluster"], axis=1)
.rename(
columns={
"Cluster_Count": "# Nrml Sessions",
"FramesetName_nunique": "# Nrml Framesets",
}
)
)
Clustered_Users = pd.merge(credit_seeking_student_count,normal_student_count,left_on='user_id',right_on='user_id',how='outer')
Clustered_Users = (
pd.merge(
Clustered_Users, all_scores, left_on="user_id", right_on="OpenDSA ID", how="inner"
)
)
Clustered_Users['# CrSk Sessions'] = Clustered_Users['# CrSk Sessions'].fillna(0)
Clustered_Users['# Nrml Sessions'] = Clustered_Users['# Nrml Sessions'].fillna(0)
Clustered_Users['# CrSk Framesets'] = Clustered_Users['# CrSk Framesets'].fillna(0)
Clustered_Users['# Nrml Framesets'] = Clustered_Users['# Nrml Framesets'].fillna(0)
len(Clustered_Users)
Clustered_Users["% of CrSk Sessions"] = Clustered_Users["# CrSk Sessions"] / (
Clustered_Users["# CrSk Sessions"] + Clustered_Users["# Nrml Sessions"]
)
Clustered_Users["% of Nrml Sessions"] = Clustered_Users["# Nrml Sessions"] / (
Clustered_Users["# CrSk Sessions"] + Clustered_Users["# Nrml Sessions"]
)
Clustered_Users["% of CrSk Framesets"] = Clustered_Users["# CrSk Framesets"] / (
Clustered_Users["# CrSk Framesets"] + Clustered_Users["# Nrml Framesets"]
)
Clustered_Users["Is CrSk"] = Clustered_Users["% of CrSk Sessions"] >= 0.5
Clustered_Users["Is CrSk2"] = Clustered_Users["% of CrSk Framesets"] >= 0.5
Clustered_Users.columns
len(Clustered_Users[Clustered_Users["Is CrSk"]==True]), len(Clustered_Users[Clustered_Users["Is CrSk"]==False])
Clustered_Users[Clustered_Users["Is CrSk"]==True]['Midterm 1'].describe()
Clustered_Users[Clustered_Users["Is CrSk"]==False]['Midterm 1'].describe()
fig, ax = plt.subplots(figsize=(20.5, 12.5))
gfg = sns.histplot(data=Fall20, x='timestamp',bins=30,hue='cluster')#,kind="kde")
gfg.set(
xlabel="# of framesets",
ylabel="# of students",
title="Frequency of Framesets Attempted Per Sutdent",
# xticks=(range(0, max(n_frames_per_student) + 3, 2)),
)
plt.show()
# The x-axis is # of framesets attempted, The y-axis is the # of students who attempted the same # of framesets
# fig.savefig("Fall20 timestamp", facecolor="white")
Y = "Midterm 1"
X ="% of CrSk Sessions"
res = stats.linregress(Clustered_Users[X], Clustered_Users[Y])
rvalue, pvalue = stats.pearsonr(x=Clustered_Users[X], y=Clustered_Users[Y])
print(f"R: {res.rvalue}")
print("P-value", pvalue)
fig, axs = plt.subplots(figsize=(6, 6))
sns.scatterplot(data=Clustered_Users, x=X, y=Y)
gfg = sns.lineplot(x=Clustered_Users[X], y=res.intercept + res.slope * Clustered_Users[X])
gfg.set(
xlabel="Percentage of credit seeking sessions per student",
ylabel="Midterm 1 score",
)
plt.legend(loc='upper right', frameon=True, shadow=True, title='r = -0.330\np-value = 0.00013')
plt.show()
fig.savefig("Midterm1-r-20-21.pdf", facecolor="white",dpi=500)
Y = "Final Score"
X = "% of CrSk Sessions"
res = stats.linregress(Clustered_Users[X], Clustered_Users[Y])
rvalue, pvalue = stats.pearsonr(x=Clustered_Users[X], y=Clustered_Users[Y])
print(f"R: {res.rvalue}")
print("P-value", pvalue)
fig, axs = plt.subplots(figsize=(6, 6))
sns.scatterplot(data=Clustered_Users, x=X, y=Y)
gfg = sns.lineplot(x=Clustered_Users[X], y=res.intercept + res.slope * Clustered_Users[X])
gfg.set(
xlabel="Percentage of credit seeking sessions per student",
ylabel="Final score",
)
plt.legend(loc='upper right', frameon=True, shadow=True, title='r = -0.269\np-value = 0.002')
plt.show()
fig.savefig("Final-Score-r-20-21.pdf", facecolor="white",dpi=500)
print(
"% of CrSk Framesets vs. Total Homeworks: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Framesets"], y=Clustered_Users["Total Homeworks"]),
)
print(
"% of CrSk Framesets vs. Final Exam: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Framesets"], y=Clustered_Users["Final Exam"]),
)
print(
"% of CrSk Framesets vs. Total Exams: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Framesets"], y=Clustered_Users["Total Exams"]),
)
print(
"% of CrSk Framesets vs. Final Score: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Framesets"], y=Clustered_Users["Final Score"]),
)
print(
"% of CrSk Framesets vs. Midterm 1: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Framesets"], y=Clustered_Users["Midterm 1"]),
)
print(
"% of Credit Seeking Vs. Total Homeworks: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Sessions"], y=Clustered_Users["Total Homeworks"]),
)
print(
"% of Credit Seeking Vs. Final Exam: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Sessions"], y=Clustered_Users["Final Exam"]),
)
print(
"% of Credit Seeking Vs. Total Exams: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Sessions"], y=Clustered_Users["Total Exams"]),
)
print(
"% of Credit Seeking Vs. Final Score: ",
stats.pearsonr(x=Clustered_Users["% of CrSk Sessions"], y=Clustered_Users["Final Score"]),
)
sns.displot(data=Clustered_Users, x="Final Score", hue="Is CrSk", multiple="stack")
sns.displot(data=Clustered_Users, x="Total Homeworks", hue="Is CrSk", multiple="stack")
sns.displot(
data=Clustered_Users, x="Final Score", hue="Is CrSk", multiple="stack", kind="kde"
)
sns.displot(
data=Clustered_Users, x="Total Exams", hue="Is CrSk", multiple="stack", kind="kde"
)
sns.displot(
data=Clustered_Users, x="Letter Grade", stat="percent", hue="Is CrSk", multiple="stack"
)
sns.displot(data=Clustered_Users, x="% of CrSk Sessions", hue="A or Not", multiple="stack")
temp = Clustered_Users[Clustered_Users["Is CrSk"] == True]["Final Score"]
print("Credit Seeking: ", temp.mean(), temp.std(), len(temp))
print()
temp = Clustered_Users[Clustered_Users["Is CrSk"] == False]["Final Score"]
print("Normal: ", temp.mean(), temp.std(), len(temp))
temp = Clustered_Users[Clustered_Users["Is CrSk"] == True]["Total Homeworks"]
print("Credit Seeking: ", temp.mean(), temp.std(), len(temp))
print()
temp = Clustered_Users[Clustered_Users["Is CrSk"] == False]["Total Homeworks"]
print("Normal: ", temp.mean(), temp.std(), len(temp))
temp = Clustered_Users[Clustered_Users["Is CrSk"] == True]["Total Exams"]
print("Credit Seeking: ", temp.mean(), temp.std(), len(temp))
print()
temp = Clustered_Users[Clustered_Users["Is CrSk"] == False]["Total Exams"]
print("Normal: ", temp.mean(), temp.std(), len(temp))
temp = Clustered_Users[Clustered_Users["Is CrSk"] == True][
["Final Exam", "Midterm 1", "Midterm 2"]
].sum(axis=1)
print("Credit Seeking: ", temp.mean(), temp.std(), len(temp), temp.median())
temp = Clustered_Users[Clustered_Users["Is CrSk"] == False][
["Final Exam", "Midterm 1", "Midterm 2"]
].sum(axis=1)
print("Normal: ", temp.mean(), temp.std(), len(temp), temp.median())
temp = Clustered_Users[Clustered_Users["Is CrSk"] == True]["Total Homeworks"]
temp.mean(), temp.median(), len(temp), temp.std()
temp = Clustered_Users[Clustered_Users["Is CrSk"] == False]["Total Homeworks"]
temp.mean(), temp.median(), len(temp), temp.std()
Clustered_Users.to_csv("Clustered_Users.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing Clusters "Clustered_Sessions.csv"
Step2: Clustered_Users
Step3: Graphs
Step4: Correlation and Regression - Scatter Plots
Step5:
Step6: t-test for comparing Scores between Students
Step7: Small effect (cannot be discerned by the naked eye) = 0.2
|
6,253
|
<ASSISTANT_TASK:>
Python Code:
# 这个项目设计来帮你熟悉 python list 和线性代数
# 你不能调用任何NumPy以及相关的科学计算库来完成作业
# 本项目要求矩阵统一使用二维列表表示,如下:
A = [[1,2,3],
[2,3,3],
[1,2,5]]
B = [[1,2,3,5],
[2,3,3,5],
[1,2,5,1]]
# 向量也用二维列表表示
C = [[1],
[2],
[3]]
#TODO 创建一个 4*4 单位矩阵
I = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
# 运行以下代码测试你的 shape 函数
%run -i -e test.py LinearRegressionTestCase.test_shape
# TODO 返回矩阵的行数和列数
def shape(M):
return len(M),len(M[0])
# TODO 每个元素四舍五入到特定小数数位
# 直接修改参数矩阵,无返回值
def matxRound(M, decPts=4):
row, col = shape(M)
for i in range(row):
for j in range(col):
M[i][j]=round(M[i][j],decPts)
pass
# 运行以下代码测试你的 matxRound 函数
%run -i -e test.py LinearRegressionTestCase.test_matxRound
# TODO 计算矩阵的转置
def transpose(M):
row, col = shape(M)
MT = []
for i in range(col):
MT.append([x[i] for x in M])
return MT
# 运行以下代码测试你的 transpose 函数
%run -i -e test.py LinearRegressionTestCase.test_transpose
# TODO 计算矩阵乘法 AB,如果无法相乘则raise ValueError
def matxMultiply(A, B):
rowA, colA = shape(A)
rowB, colB = shape(B)
if not colA == rowB:
raise ValueError
# result would be rowA x colB
result = [[0] * colB for row in range(rowA)]
BT = transpose(B)
for i in range(rowA):
rowa = A[i]
for j in range(colB):
colb = BT[j]
element = sum([rowa[x]*colb[x] for x in range(colA)])
result[i][j] = element
return result
# 运行以下代码测试你的 matxMultiply 函数
%run -i -e test.py LinearRegressionTestCase.test_matxMultiply
# TODO 构造增广矩阵,假设A,b行数相同
def augmentMatrix(A, b):
# result would be rowA x (colA+colb)
rowA, colA = shape(A)
result = [[0] * (colA+1) for row in range(rowA)]
for i in range(rowA):
for j in range(colA):
result[i][j] = A[i][j]
result[i][colA] = b[i][0]
return result
# 运行以下代码测试你的 augmentMatrix 函数
%run -i -e test.py LinearRegressionTestCase.test_augmentMatrix
# TODO r1 <---> r2
# 直接修改参数矩阵,无返回值
def swapRows(M, r1, r2):
colM = shape(M)[1]
for i in range(colM):
tmp = M[r1][i]
M[r1][i] = M[r2][i]
M[r2][i] = tmp
pass
# 运行以下代码测试你的 swapRows 函数
%run -i -e test.py LinearRegressionTestCase.test_swapRows
# TODO r1 <--- r1 * scale
# scale为0是非法输入,要求 raise ValueError
# 直接修改参数矩阵,无返回值
def scaleRow(M, r, scale):
if scale == 0:
raise ValueError
colM = shape(M)[1]
for i in range(colM):
M[r][i] *= scale
pass
# 运行以下代码测试你的 scaleRow 函数
%run -i -e test.py LinearRegressionTestCase.test_scaleRow
# TODO r1 <--- r1 + r2*scale
# 直接修改参数矩阵,无返回值
def addScaledRow(M, r1, r2, scale):
colM = shape(M)[1]
for i in range(colM):
M[r1][i] += M[r2][i]*scale
pass
# 运行以下代码测试你的 addScaledRow 函数
%run -i -e test.py LinearRegressionTestCase.test_addScaledRow
# 不要修改这里!
from helper import *
A = generateMatrix(3,seed,singular=False)
b = np.ones(shape=(3,1),dtype=int) # it doesn't matter
Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了
printInMatrixFormat(Ab,padding=3,truncating=0)
# 不要修改这里!
A = generateMatrix(3,seed,singular=True)
b = np.ones(shape=(3,1),dtype=int)
Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了
printInMatrixFormat(Ab,padding=3,truncating=0)
# TODO 实现 Gaussain Jordan 方法求解 Ax = b
Gaussian Jordan 方法求解 Ax = b.
参数
A: 方阵
b: 列向量
decPts: 四舍五入位数,默认为4
epsilon: 判读是否为0的阈值,默认 1.0e-16
返回列向量 x 使得 Ax = b
返回None,如果 A,b 高度不同
返回None,如果 A 为奇异矩阵
from fractions import Fraction
def gj_Solve(A, b, decPts=4, epsilon = 1.0e-16):
def max_idx(list):
if max(list)<=epsilon:
raise ValueError
return 0 if len(list)<=0 else list.index(max(list))
if not shape(A)[0] == shape(b)[0]:
return None
Ab = augmentMatrix(A, b)
for i in range(shape(A)[1]):
col_i = [abs(Ab[row_num][i]) for row_num in range(i, shape(Ab)[0])]
try:
idx = max_idx(col_i) + i
swapRows(Ab, i, idx)
scaleRow(Ab, i, 1.0/Ab[i][i])
for j in range(shape(Ab)[0]):
if j != i:
addScaledRow(Ab, j, i, Fraction(-Ab[j][i]))
except ValueError:
return None
result = [[0] * 1 for row in range(shape(Ab)[0])]
for i in range(shape(Ab)[0]):
result[i][0]=Ab[i][-1]
return result
# 运行以下代码测试你的 gj_Solve 函数
%run -i -e test.py LinearRegressionTestCase.test_gj_Solve
# 不要修改这里!
# 运行一次就够了!
from helper import *
from matplotlib import pyplot as plt
%matplotlib inline
X,Y = generatePoints(seed,num=100)
## 可视化
plt.xlim((-5,5))
plt.xlabel('x',fontsize=18)
plt.ylabel('y',fontsize=18)
plt.scatter(X,Y,c='b')
plt.show()
#TODO 请选择最适合的直线 y = mx + b
m1 = 3.2
b1 = 7.2
# 不要修改这里!
plt.xlim((-5,5))
x_vals = plt.axes().get_xlim()
y_vals = [m1*x+b1 for x in x_vals]
plt.plot(x_vals, y_vals, '-', color='r')
plt.xlabel('x',fontsize=18)
plt.ylabel('y',fontsize=18)
plt.scatter(X,Y,c='b')
plt.show()
# TODO 实现以下函数并输出所选直线的MSE
def calculateMSE(X,Y,m,b):
list_ = ([(val[1]-val[0]*m-b)**2 for val in zip(X,Y)])
return sum(list_)/len(list_)
print(calculateMSE(X,Y,m1,b1))
# TODO 实现线性回归
'''
参数:X, Y 存储着一一对应的横坐标与纵坐标的两个一维数组
返回:m,b 浮点数
'''
def linearRegression(X,Y):
MX = [[val,1] for val in X]
MXT = transpose(MX)
result_left = matxMultiply(MXT,MX)
MY = [[val] for val in Y]
result_right = matxMultiply(MXT,MY)
[[m],[b]]=gj_Solve(result_left,result_right)
return (m,b)
m2,b2 = linearRegression(X,Y)
assert isinstance(m2,float),"m is not a float"
assert isinstance(b2,float),"b is not a float"
print(m2,b2)
# 请不要修改下面的代码
x1,x2 = -5,5
y1,y2 = x1*m2+b2, x2*m2+b2
plt.xlim((-5,5))
plt.xlabel('x',fontsize=18)
plt.ylabel('y',fontsize=18)
plt.scatter(X,Y,c='b')
plt.plot((x1,x2),(y1,y2),'r')
plt.title('y = {m:.4f}x + {b:.4f}'.format(m=m2,b=b2))
plt.show()
print(calculateMSE(X,Y,m2,b2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.2 返回矩阵的行数和列数
Step2: 1.3 每个元素四舍五入到特定小数数位
Step3: 1.4 计算矩阵的转置
Step4: 1.5 计算矩阵乘法 AB
Step5: 2 Gaussign Jordan 消元法
Step6: 2.2 初等行变换
Step7: 2.3 Gaussian Jordan 消元法求解 Ax = b
Step8: 请按照算法的步骤3,逐步推演可逆矩阵的变换。
Step10: 请按照算法的步骤3,逐步推演奇异矩阵的变换。
Step11: (选做) 2.4 算法正确判断了奇异矩阵:
Step12: 3.2 拟合一条直线
Step13: 3.2.2 计算平均平方误差 (MSE)
Step14: 3.2.3 调整参数 $m, b$ 来获得最小的平方平均误差
Step15: 你求得的回归结果是什么?
Step16: 你求得的回归结果对当前数据集的MSE是多少?
|
6,254
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display
from dividedDifferences import get_coeff, get_polynomial
from sympy import init_printing
from sympy import symbols, simplify
from sympy import Eq, S, Function
init_printing()
# The values in the points we use for the extrapolation
f0, f1, f2, f3 = symbols('f_0, f_1, f_2, f_3', real=True)
values = [f0, f1, f2, f3]
# Grid spacing
h = symbols('h', real=True)
# The value at the ghost
fg = symbols('f_g', real=True)
# Variables just for illustrative purposes
x = symbols('x')
# Coefficients in Newton polynomial
a0, a1, a2, a3 = symbols('a_0, a_1, a_2, a_3')
coeffs = [a0, a1, a2, a3]
# The points to extrapolate from (when they are unspecified)
x0, x1, x2, x3 = symbols('x_0, x_1, x_2, x_3')
positions = [x0, x1, x2, x3]
display(positions)
display(values)
display(Eq(symbols('p_N')(x), get_polynomial(coeffs, positions, symbols('x'))))
solvedCoeffs = get_coeff(values, positions)
for nr, coeff in enumerate(solvedCoeffs):
display(Eq(symbols('a_'+str(nr)), coeff))
display(Eq(Function('p_N')(x), get_polynomial(solvedCoeffs, positions, x)))
# Specification of the inner points
# Using x_0 as the reference point
x_0 = x0
x_1 = x0 + (S(1)/2)*h
x_2 = x0 + (1+S(1)/2)*h
x_3 = x0 + (2+S(1)/2)*h
specifiedPositions = [x_0, x_1, x_2, x_3]
display(specifiedPositions)
# Specification of the position of the ghost point
x_g = x_0 - h*S(1)/2
# Evaluate the polynomial
p = get_polynomial(get_coeff(values, specifiedPositions), specifiedPositions, x_g)
display(Eq(fg, simplify(p)))
# Specification of the inner points
# Using x_0 as the reference point
x_0 = x0
x_1 = x0 + h
x_2 = x0 + 2*h
x_3 = x0 + 3*h
specifiedPositions = [x_0, x_1, x_2, x_3]
display(specifiedPositions)
# Specification of the position of the ghost point
x_g = x_0 - h
# Evaluate the polynomial
p = get_polynomial(get_coeff(values, specifiedPositions), specifiedPositions, x_g)
display(Eq(fg, simplify(p)))
# Specification of the inner points
# Using x_0 as the reference point
x_0 = x0
x_1 = x0 + h
x_2 = x0 + 2*h
x_3 = x0 + (2+S(1)/2)*h
specifiedPositions = [x_0, x_1, x_2, x_3]
display(specifiedPositions)
# Specification of the position of the ghost point
x_g = x_3 + h*S(1)/2
# Evaluate the polynomial
p = get_polynomial(get_coeff(values, specifiedPositions), specifiedPositions, x_g)
display(Eq(fg, simplify(p)))
# Specification of the points
# Using x_0 as the reference point
x_0 = x0
x_1 = x0 + h
x_2 = x0 + 2*h
x_3 = x0 + 3*h
specifiedPositions = [x_0, x_1, x_2, x_3]
display(specifiedPositions)
# Specification of the position of the ghost point
x_g = x_3 + h
# Evaluate the polynomial
p = get_polynomial(get_coeff(values, specifiedPositions), specifiedPositions, x_g)
display(Eq(fg, simplify(p)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Intro
Step2: sorted after increasing value of the coordinate $x$. These points takes the following values
Step3: Our goal is to use these four points to build a Newton polynomial we can use to extrapolate to the ghost point $x_g$.
Step4: The coefficients
Step5: Inserted in the polynomial yields
Step6: When the ghost point is the first in the $x$ array
Step7: Evaluating the polynomial in $x_g=x_0-\frac{h}{2}$ ,where $x_0$ is the boundary yields
Step8: When we are not using the boundary value in the polynomial
Step9: Evaluating the polynomial in $x_g=x_0-h$ yields
Step10: When the ghost point is the last in the $x$ array
Step11: Evaluating the polynomial in $x_g=x_3+\frac{h}{2}$, where $x_3$ is the boundary yields
Step12: When we are not using the boundary value in the polynomial
Step13: Evaluating the polynomial in $x_g=x_3+h$ yields
|
6,255
|
<ASSISTANT_TASK:>
Python Code:
url = "https://lists.wikimedia.org/pipermail/analytics/"
arx = Archive(url,archive_dir="../archives")
#threads = arx.get_threads()
len(arx.get_threads())
y = [t.get_num_messages() for t in arx.get_threads()]
plt.hist(y, bins=30)
plt.xlabel('number of messages in a thread')
plt.show()
n = [t.get_num_people() for t in arx.get_threads()]
plt.hist(n, bins = 20)
plt.xlabel('number of email-address in a thread')
plt.show()
y = [t.get_duration().days for t in arx.get_threads()]
plt.hist(y, bins = (10))
plt.xlabel('duration of a thread(days)')
plt.show()
y = [t.get_duration().seconds for t in arx.get_threads()]
plt.hist(y, bins = (10))
plt.xlabel('duration of a thread(seconds)')
plt.show()
print(arx.get_threads()[0].get_duration())
content = arx.get_threads()[0].get_root().data['Body']
content
len(content.split())
short_threads = []
long_threads = []
for t in arx.get_threads():
if(t.get_num_messages() < 6): short_threads.append(t)
else: long_threads.append(t)
print(len(short_threads))
print(len(long_threads))
long_threads[0].get_content()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can count the number of threads in the archive easily. The first time you run Archive.get_thread it may take some time to compute, but the result is cached in the Archive object.
Step2: We can plot a histogram of the number of messages in each thread. In most cases this will be a power law distribution.
Step3: We can also plot the number of people participating in each thread. Here, the participants are differentiated by the From
Step4: The duration of a thread is the amount of elapsed time between its first and last message.
Step5: You can examine the properties of a single thread.
Step6: Suppose we want to know whether or not longer threads (that contain more distinct messages) have fewer words in them per message.
Step7: You can get the content of a thread like this
|
6,256
|
<ASSISTANT_TASK:>
Python Code:
import pints
import pints.toy as toy
import numpy as np
import matplotlib.pyplot as plt
# Load a forward model
model = toy.LogisticModel()
# Create some toy data
real_parameters = [0.015, 500] # growth rate, carrying capacity
times = np.linspace(0, 1000, 100)
org_values = model.simulate(real_parameters, times)
# Add noise
noise = 50
values = org_values + np.random.normal(0, noise, org_values.shape)
real_parameters = np.array(real_parameters + [noise])
# Get properties of the noise sample
noise_sample_mean = np.mean(values - org_values)
noise_sample_std = np.std(values - org_values)
# Create an object with links to the model and time series
problem = pints.SingleOutputProblem(model, times, values)
# Create a log-likelihood function (adds an extra parameter!)
log_likelihood = pints.GaussianLogLikelihood(problem)
# Create a uniform prior over both the parameters and the new noise variable
log_prior = pints.UniformLogPrior(
[0.01, 400, noise*0.1],
[0.02, 600, noise*100]
)
# Create a posterior log-likelihood (log(likelihood * prior))
log_posterior = pints.LogPosterior(log_likelihood, log_prior)
# Perform sampling using MCMC, with a single chain
x0 = real_parameters * 1.1
mcmc = pints.MCMCController(log_posterior, 1, [x0])
mcmc.set_max_iterations(6000)
mcmc.set_log_to_screen(False)
print('Running...')
chains = mcmc.run()
print('Done!')
# Select chain 0 and discard warm-up
chain = chains[0]
chain = chain[3000:]
import pints.plot
# Plot the 1d histogram of each parameter
pints.plot.histogram([chain], parameter_names=['growth rate', 'carrying capacity', 'noise'])
plt.show()
pints.plot.pairwise(chain, heatmap=True, parameter_names=['growth rate', 'carrying capacity', 'noise'], ref_parameters=real_parameters)
plt.show()
pints.plot.pairwise(chain, kde=True, parameter_names=['growth rate', 'carrying capacity', 'noise'], ref_parameters=real_parameters)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting 1d histograms
Step2: Plotting 2d histograms and a matrix of parameter distribution plots
Step3: Matrix of parameter distribution plots with KDE
|
6,257
|
<ASSISTANT_TASK:>
Python Code:
# Simple Function
def greet():
'''Simple Greet Function'''
print('Hello World')
greet()
# Function with arguments
def greet(name):
'''Simple Greet Function with arguments'''
print('Hello ', name)
greet('John')
# printing the doc string
print(greet.__doc__)
# Function with return statement
def add_numbers(num1,num2):
return num1 + num2
print(add_numbers(2,3.0))
# Since arguments are not strongly typed you can even pass a string
print(add_numbers('Hello','World'))
def myfunc():
x = 5
print('Value inside the function ',x)
x = 10
myfunc()
print('Value outside the function',x)
def myfunc():
#x = 5
print('Value inside the function ',x)
x = 10
myfunc()
print('Value outside the function',x)
def myfunc():
y = 5
print('Value inside the function ',y)
myfunc()
print('Value outside the function',y)
def myfunc():
global z
z = 5
print('Value inside the function ',z)
#z = 10
myfunc()
print('Value outside the function',z)
def greet(name,msg):
'''Simple greet function with name and message arguments'''
print("Hello " + name + ', ' + msg)
greet('John','Good Morning!')
# this will generate error since we missed one argument
greet('John')
def greet(name,msg='Good Evening!'):
'''Simple greet function with name and message arguments'''
print("Hello " + name + ', ' + msg)
greet('John')
def greet(name,msg='Good Evening!',salute):
'''Simple greet function with name and message arguments'''
print("Hello " + salute + '.' + name + ', ' + msg)
greet('John','Good Evening','Mr')
def greet(name,msg='Good Evening!',salute='Mr'):
'''Simple greet function with name and message arguments'''
print("Hello " + salute + '.' + name + ', ' + msg)
greet('John','Good Evening','Mrs')
def greet(name,msg='Good Evening!',salute='Mr'):
'''Simple greet function with name and message arguments'''
print("Hello " + salute + '.' + name + ', ' + msg)
# keyword arguments
greet(name="Jack",msg="How are you?")
# keyword arguments - out of order
greet(msg='How do you do?',name="Brian")
# mix of keyword and positional arguments
greet("Jill",salute='Ms',msg="Good to see you.")
greet(name="Keith","Good Afternoon")
def greet(*names):
'''This function greets all with a Hello'''
for name in names:
print('Hello ',name)
greet('John','Keith','Brian','Jose')
def factorial(num):
if(num <= 0):
return 0
elif(num == 1):
return 1
else:
return(num * factorial(num-1))
num = 4
print("Factorial of number ",num," is ",factorial(num))
def multiply(x,y):
return x * y
multiply(2,4)
def multiply(*args):
x = 1
for num in args:
x *= num
return(x)
print(multiply(2,4))
print(multiply(2,4,5))
print(multiply(3,5,8,9,10))
def print_values(**kwargs):
for key, value in kwargs.items():
print("The value of {} is {}".format(key, value))
print_values(
name_1="Alex",
name_2="Gray",
name_3="Harper",
name_4="Phoenix",
name_5="Remy",
name_6="Val"
)
def some_args(arg_1, arg_2, arg_3):
print("arg_1:", arg_1)
print("arg_2:", arg_2)
print("arg_3:", arg_3)
args = ("Sammy", "Casey", "Alex")
some_args(*args)
def some_args(arg_1, arg_2, arg_3):
print("arg_1:", arg_1)
print("arg_2:", arg_2)
print("arg_3:", arg_3)
my_list = [2, 3]
some_args(1, *my_list)
def some_kwargs(kwarg_1, kwarg_2, kwarg_3):
print("kwarg_1:", kwarg_1)
print("kwarg_2:", kwarg_2)
print("kwarg_3:", kwarg_3)
kwargs = {"kwarg_1": "Val", "kwarg_2": "Harper", "kwarg_3": "Remy"}
some_kwargs(**kwargs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 2
Step2: Example 3
Step3: Scope and Lifetime of Variables
Step4: Variables defined outside the function are visible from inside which means they have a global scope.
Step5: Global Variable
Step6: You have to use the global keyword for variables if you want to use those variables declared outside the function inside the function
Step7: Function Arguments
Step8: Default Arguments
Step9: One rule for default arguments is that once an argument has a default value then all the arguments to the right of it must also have default values. The following example will produce an error.
Step10: Keyword Arguments
Step11: However please note that having a positional argument after keyword argument will result into errors. For example the following example will generate an error.
Step12: Arbitrary Arguments
Step13: Recursive Functions
Step14: Using *args and ***kwargs in functions
Step15: Later if we decide to extend the multiply function to accept n number of arguments, we need to use the *args feature
Step16: Using **kwargs
Step17: Ordering Arguments
|
6,258
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import torch
MyNet = torch.nn.Sequential(torch.nn.Linear(4, 15),
torch.nn.Sigmoid(),
torch.nn.Linear(15, 3),
)
MyNet.load_state_dict(torch.load("my_model.pt"))
input = load_data()
assert type(input) == torch.Tensor
'''
training part
'''
# X, Y = load_iris(return_X_y=True)
# lossFunc = torch.nn.CrossEntropyLoss()
# opt = torch.optim.Adam(MyNet.parameters(), lr=0.001)
# for batch in range(0, 50):
# for i in range(len(X)):
# x = MyNet(torch.from_numpy(X[i]).float()).reshape(1, 3)
# y = torch.tensor(Y[i]).long().unsqueeze(0)
# loss = lossFunc(x, y)
# loss.backward()
# opt.step()
# opt.zero_grad()
# # print(x.grad)
# # print(loss)
# # print(loss)
output = MyNet(input)
probs = torch.nn.functional.softmax(output.reshape(1, 3), dim=1)
confidence_score, classes = torch.max(probs, 1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
6,259
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from numpy import *
from bokeh import *
from bokeh.plotting import *
output_notebook()
from matmodlab2 import *
from pandas import read_excel
from scipy.optimize import leastsq
diff = lambda x: np.ediff1d(x, to_begin=0.)
trace = lambda x, s='SIG': x[s+'11'] + x[s+'22'] + x[s+'33']
RTJ2 = lambda x: sqrt(((x['SIG11']-x['SIG22'])**2 +
(x['SIG22']-x['SIG33'])**2 +
(x['SIG33']-x['SIG22'])**2)/6.)
df = read_excel('porodata.xlsx', sheetname='hydrostatic')
df['EV'] = trace(df, 'STRAIN')
df['I1'] = trace(df, 'SIG')
df['dEV'] = diff(df['EV'])
df['dI1'] = diff(df['I1'])
plot = figure(x_axis_label='Volume Strain', y_axis_label='Pressure')
plot.circle(-df['EV'], -df['I1']/3.)
plot.text(-df['EV'], -df['I1']/3.,
text=range(len(df)),text_color="#333333",
text_align="left", text_font_size="5pt")
show(plot)
kfun = lambda B0, B1, B2, I1: B0 + B1 * exp(-B2 / abs(I1))
def kmm_bulk(x, fac, I1, K):
B0, B1, B2 = x * fac
return K - kfun(B0, B1, B2, I1)
imax = 101
df1 = df.iloc[imax:].copy()
K = np.array(df1['dI1'] / 3. / df1['dEV'])
b0 = np.array((K[-1], K[0] - K[-1], 1e9))
fac = 1e9
B, icov = leastsq(kmm_bulk, b0/fac, args=(fac, df1['I1'], K))
B0, B1, B2 = B * fac
B0, B1, B2
plot = figure(x_axis_label='Bulk Modulus', y_axis_label='Pressure')
plot.circle(-df1['I1']/3., K)
plot.line(-df['I1']/3., kfun(B0, B1, B2, df['I1']), color='red')
show(plot)
df['EP'] = df['I1'] / 3. / kfun(B0, B1, B2, df['I1']) - df['EV']
p3 = max(df['EP'])
df['PORO'] = p3 - df['EP']
plot = figure(x_axis_label='Plastic Strain', y_axis_label='Pressure')
plot.circle(df['EP'], -df['I1']/3.)
show(plot)
plot = figure(x_axis_label='Pressure', y_axis_label='PORO')
df2 = df.iloc[:imax].copy()
plot.circle(-df2['I1']/3., df2['PORO'])
show(plot)
def pfun(P0, P1, P2, P3, I1):
xi = -I1 / 3. + P0
return P3 * exp(-(P1 + P2 * xi) * xi)
def kmm_poro(x, fac, I1, P):
p0, p1, p2, p3 = asarray(x) * fac
return P - pfun(p0, p1, p2, p3, I1)
p0 = (1, 1, 1, p3)
fac = np.array([1e8, 1e-10, 1e-18, 1])
p, icov = leastsq(kmm_poro, p0, args=(fac, df2['I1'], df2['PORO']))
P0, P1, P2, P3 = p * fac
P0, P1, P2, P3
plot = figure(x_axis_label='Pressure', y_axis_label='PORO')
plot.circle(-df2['I1']/3., df2['PORO'], legend='Data')
plot.line(-df2['I1']/3., pfun(P0, P1, P2, P3, df2['I1']), color='red', legend='Fit')
show(plot)
keys = (2.5, 5.0, 7.5, 10.0, 12.5, 15.0, 22.5, 30.0)
colors = ('red', 'blue', 'orange', 'purple',
'green', 'black', 'magenta', 'teal', 'cyan')
df2 = {}
p = figure(x_axis_label='I1', y_axis_label='Sqrt[J2]')
p1 = figure(x_axis_label='Axial Strain', y_axis_label='Axial Stress')
for (i, key) in enumerate(keys):
key = 'txc p={0:.01f}MPa'.format(key)
x = read_excel('porodata.xlsx', sheetname=key)
x['I1'] = trace(x, 'SIG')
x['RTJ2'] = RTJ2(x)
df2[key] = x
p.circle(-df2[key]['I1'], df2[key]['RTJ2'], legend=key[4:], color=colors[i])
# determine where hydrostatic preload ends
j = nonzero(x['SIG11'] - x['SIG22'])[0]
E0, S0 = df2[key]['STRAIN11'][j[0]], df2[key]['SIG11'][j[0]]
p1.circle(-df2[key]['STRAIN11'][j]+E0, -df2[key]['SIG11'][j]+S0,
legend=key[4:], color=colors[i])
p.legend.orientation = 'horizontal'
show(p1)
show(p)
key = 'txc p=2.5MPa'
j = nonzero(df2[key]['SIG11'] - df2[key]['SIG22'])[0]
df3 = df2[key].iloc[j].copy()
E0, S0 = df3['STRAIN11'].iloc[0], df3['SIG11'].iloc[0]
EF, SF = df3['STRAIN11'].iloc[-1], df3['SIG11'].iloc[-1]
E = (SF - S0) / (EF - E0)
print('{0:E}'.format(E))
G = lambda I1: 3 * kfun(B0, B1, B2, I1) * E / (9 * kfun(B0, B1, B2, I1) - E)
gfun = lambda g0, g1, g2, rtj2: g0 * (1 - g1 * exp(-g2 * rtj2)) / (1 - g1)
def kmm_shear(x, fac, rtj2, G):
g0, g1, g2 = asarray(x) * fac
return G - gfun(g0, g1, g2, rtj2)
g = asarray(G(df3['I1']))
g0 = (g[0], .0001, 0)
fac = 1.
g, icov = leastsq(kmm_shear, g0, args=(fac, RTJ2(df3), g))
G0, G1, G2 = g * fac
G0, G1, G2
p2 = figure(x_axis_label='Sqrt[J2]', y_axis_label='Shear Modulus')
p2.circle(RTJ2(df3), G(df3['I1']))
p2.line(RTJ2(df3), gfun(G0, G1, G2, RTJ2(df3)), color='red')
show(p2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Summary
Step2: Hydrostatic Response
Step3: It appears that the unloading occurs at data point 101 and continues until the end of the data. This curve will be used to fit the bulk modulus parameters. Below, scipy is used to optimize the parameters to the curve.
Step4: Poro response
Step5: Shear Response
Step6: The axial stress versus axial strain plot shows that the response is linear, meaning that the elastic modulus is constant.
Step7: The shear modulus can now be determined
|
6,260
|
<ASSISTANT_TASK:>
Python Code:
from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
from wikidataintegrator.ref_handlers import update_retrieved_if_new_multiple_refs
import pandas as pd
from pandas import read_csv
import requests
from tqdm.notebook import trange, tqdm
import ipywidgets
import widgetsnbextension
import time
datasrc = 'data/FinalReferenceStandard200Labels.csv'
exppath = 'results/'
print("Logging in...")
import wdi_user_config ## Credentials stored in a wdi_user_config file
login_dict = wdi_user_config.get_credentials()
login = wdi_login.WDLogin(login_dict['WDUSER'], login_dict['WDPASS'])
spl_adr_raw = read_csv(datasrc, delimiter="|", header=0, dtype={'Index':int,'PT ID':str,'LLT ID':str}).fillna('None')
print(spl_adr_raw.head(n=2))
## Retrieve the QIDs for each UMLS CUI ID (The property for UMLS CUI IDs is P2892)
sparqlQuery = "SELECT * WHERE {?topic wdt:P2892 ?CUI}"
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
## Format the data for analysis
wdmap = []
i=0
while i < len(result["results"]["bindings"]):
umls_qid = result["results"]["bindings"][i]["topic"]["value"].replace("http://www.wikidata.org/entity/", "")
cui_id = result["results"]["bindings"][i]["CUI"]["value"]
tmpdict = {'UMLS CUI':cui_id,'disease_WDID':umls_qid}
wdmap.append(tmpdict)
i=i+1
wdid_umls_all = pd.DataFrame(wdmap)
## Drop any entries that are not of interest
umls_cui_list = spl_adr_raw['UMLS CUI'].unique().tolist()
wdid_umls_df = wdid_umls_all.loc[wdid_umls_all['UMLS CUI'].isin(umls_cui_list)]
wdid_umls_df.to_csv(exppath+'cui_wdid_xref.tsv',sep='\t',header=True)
wdid_umls_df = read_csv(exppath+'cui_wdid_xref.tsv',delimiter='\t',header=0,index_col=0)
## Exclude entities with one to many OR many to one mappings
wdid_umls_df_unique = wdid_umls_df.drop_duplicates(subset='disease_WDID').copy()
wdid_umls_df_unique.drop_duplicates(subset='UMLS CUI',inplace=True)
print("initial mapping table size: ",len(wdid_umls_df), " de-duplicated: ",len(wdid_umls_df_unique))
## Merge the mapping table to the original table
spl_with_disease_wdids = spl_adr_raw.merge(wdid_umls_df_unique, on='UMLS CUI', how='left')
print(len(spl_adr_raw),len(spl_with_disease_wdids))
## Unit test
query_start = 'SELECT ?item ?itemLabel WHERE {?item wdt:P31 wd:Q28885102; rdfs:label ?itemLabel. FILTER(CONTAINS(LCASE(?itemLabel), "'
query_subject = 'NUCYNTA'
query_end = '"@en)).}'
sparqlQuery = query_start+query_subject.lower()+query_end
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
drug_qid = result["results"]["bindings"][0]["item"]["value"].replace("http://www.wikidata.org/entity/", "")
drug_label = result["results"]["bindings"][0]["itemLabel"]["value"]
print(drug_qid, drug_label)
print(len(result["results"]["bindings"]))
#drug_list = ['NUCYNTA','Natazia','EDURANT'] ## Loop test
drug_list = spl_with_disease_wdids['Drug Name'].unique().tolist()
pharm_start = 'SELECT ?item ?itemLabel WHERE {?item wdt:P31 wd:Q28885102; rdfs:label ?itemLabel. FILTER(CONTAINS(LCASE(?itemLabel), "'
med_start = 'SELECT ?item ?itemLabel WHERE {?item wdt:P31 wd:Q12140; rdfs:label ?itemLabel. FILTER(CONTAINS(LCASE(?itemLabel), "'
chem_start = 'SELECT ?item ?itemLabel WHERE {?item wdt:P31 wd:Q11173; rdfs:label ?itemLabel. FILTER(CONTAINS(LCASE(?itemLabel), "'
query_end = '"@en)).}'
drug_wdid_list = []
drug_match_failed = []
for i in tqdm(range(len(drug_list))):
query_subject = drug_list[i].lower()
try:
sparqlQuery = pharm_start+query_subject+query_end
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
drug_qid = result["results"]["bindings"][0]["item"]["value"].replace("http://www.wikidata.org/entity/", "")
drug_label = result["results"]["bindings"][0]["itemLabel"]["value"]
drug_wdid_list.append({'Drug Name':drug_list[i],'drug_WDID':drug_qid,'drug_wd_label':drug_label,'instance_of':'pharmaceutical product'})
except:
try:
sparqlQuery = med_start+query_subject+query_end
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
drug_qid = result["results"]["bindings"][0]["item"]["value"].replace("http://www.wikidata.org/entity/", "")
drug_label = result["results"]["bindings"][0]["itemLabel"]["value"]
drug_wdid_list.append({'Drug Name':drug_list[i],'drug_WDID':drug_qid,'drug_wd_label':drug_label,'instance_of':'medication'})
except:
try:
sparqlQuery = chem_start+query_subject+query_end
result = wdi_core.WDItemEngine.execute_sparql_query(sparqlQuery)
drug_qid = result["results"]["bindings"][0]["item"]["value"].replace("http://www.wikidata.org/entity/", "")
drug_label = result["results"]["bindings"][0]["itemLabel"]["value"]
drug_wdid_list.append({'Drug Name':drug_list[i],'drug_WDID':drug_qid,'drug_wd_label':drug_label,'instance_of':'chemical'})
except:
drug_match_failed.append(drug_list[i])
drug_wdid_df = pd.DataFrame(drug_wdid_list)
drug_wdid_df.to_csv(exppath+'drug_wdid_df.tsv',sep='\t',header=True)
print(i)
print(drug_match_failed)
## In the future, consider only running these
with open(exppath+'drug_match_failed.txt','w') as store_it:
for eachfailure in drug_match_failed:
store_it.write(eachfailure+'\n')
drug_match_failed = []
with open(exppath+'drug_match_failed.txt','r') as stored_it:
for eachline in stored_it:
drug_match_failed.append(eachline.strip())
drug_wdid_df = read_csv(exppath+'drug_wdid_df.tsv',delimiter='\t',header=0, index_col=0)
print(drug_wdid_df.head(n=2))
print(drug_match_failed)
print(len(drug_wdid_df)+len(drug_match_failed))
df_to_write = spl_with_disease_wdids.merge(drug_wdid_df, on='Drug Name',how = 'left')
print(len(df_to_write))
all_data_available = df_to_write.loc[(~df_to_write['disease_WDID'].isnull()) &
(~df_to_write['drug_WDID'].isnull())]
not_attempted = df_to_write.loc[(df_to_write['disease_WDID'].isnull()) |
(df_to_write['drug_WDID'].isnull())]
print(len(all_data_available))
#print(not_attempted.head(n=2))
print(all_data_available.head(n=1))
## Save the Failures
not_attempted.to_csv(exppath+'qid_missing_not_attempted.tsv',sep='\t',header=True)
from datetime import datetime
import copy
def create_reference(spl_url,source_type):
timeStringNow = datetime.now().strftime("+%Y-%m-%dT00:00:00Z")
archived_date = datetime.strptime('9/29/2015','%m/%d/%Y').strftime("+%Y-%m-%dT00:00:00Z")
refStatedIn = wdi_core.WDItemID(value="Q73670648", prop_nr="P248", is_reference=True)
refRetrieved = wdi_core.WDTime(timeStringNow, prop_nr="P813", is_reference=True)
refRetrieved2 = wdi_core.WDTime(archived_date, prop_nr="P2960", is_reference=True)
refURL = wdi_core.WDUrl(value=spl_url, prop_nr="P854", is_reference=True)
reftype = wdi_core.WDString(value=source_type, prop_nr="P958", is_reference=True)
return [refStatedIn, refRetrieved, refRetrieved2, refURL, reftype]
## Unit test -- write a statement
fda_base_spl_url = 'https://dailymed.nlm.nih.gov/dailymed/drugInfo.cfm?setid='
i=0
drug_qid = all_data_available.iloc[i]['drug_WDID']
#disease_qid = all_data_available.iloc[i]['disease_WDID']
disease_qid = 'Q4115189' #sandbox run
spl_drug_id = all_data_available.iloc[i]['Drug ID']
spl_url = fda_base_spl_url+spl_drug_id
source_type = all_data_available.iloc[i]['Section Display Name']
reference = create_reference(spl_url,source_type)
statement = [wdi_core.WDItemID(value=drug_qid, prop_nr="P5642",
references=[copy.deepcopy(reference)])]
wikidata_item = wdi_core.WDItemEngine(wd_item_id=disease_qid, data=statement, append_value="P5642",
global_ref_mode='CUSTOM', ref_handler=update_retrieved_if_new_multiple_refs)
#wikidata_item.get_wd_json_representation()
wikidata_item.write(login)
print(i,disease_qid,drug_qid)
wd_revision_list = []
run_list = all_data_available[0:3] ## test run
#run_list = all_data_available
while i < len(run_list):
drug_qid = all_data_available.iloc[i]['drug_WDID']
disease_qid = all_data_available.iloc[i]['disease_WDID']
spl_drug_id = all_data_available.iloc[i]['Drug ID']
spl_url = fda_base_spl_url+spl_drug_id
source_type = all_data_available.iloc[i]['Section Display Name']
reference = create_reference(spl_url,source_type)
statement = [wdi_core.WDItemID(value=drug_qid, prop_nr="P5642", references=[copy.deepcopy(reference)])]
wikidata_item = wdi_core.WDItemEngine(wd_item_id=disease_qid, data=statement, append_value="P5642",
global_ref_mode='CUSTOM', ref_handler=update_retrieved_if_new_multiple_refs)
wikidata_item.write(login, edit_summary='added ADR relationship from FDA SPLs')
wd_revision_list.append({'drug':drug_qid,'disease':disease_qid,'wd_revid':wikidata_item.lastrevid})
i=i+1
wd_edit_results = pd.DataFrame(wd_revision_list)
print(wd_edit_results)
wd_edit_results.to_csv(exppath+'run_results.tsv',sep='\t',header=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Retrieve and map WDIDs
Step3: Query Wikidata for instances of drugs whose names match to product label names
Step4: Merge tables to convert drug names to WDID products
Step5: Convert triples to WD statements
|
6,261
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output, error)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 5000
learning_rate = 1
hidden_nodes = 16
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
6,262
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
path = "data/dogscats/sample/"
#path = "data/dogscats"
from __future__ import division,print_function
import os, json
from glob import glob
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
from importlib import reload
import utils; reload(utils)
from utils import plots
# reload is handy if you change something in the file
# As large as you can, but no larger than 64 is recommended.
# If you have an older or cheaper GPU, you'll run out of memory, so will have to decrease this.
batch_size=64
# Import our class, and instantiate
import vgg16; reload(vgg16)
from vgg16 import Vgg16
vgg = Vgg16()
# Grab a few images at a time for training and validation.
# NB: They must be in subdirectories named based on their category
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size*2)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
vgg = Vgg16()
batches = vgg.get_batches(path+'train', batch_size=4)
imgs,labels = next(batches)
plots(imgs, titles=labels)
vgg.predict(imgs, True)
vgg.classes[:4]
batch_size=64
batches = vgg.get_batches(path+'train', batch_size=batch_size)
val_batches = vgg.get_batches(path+'valid', batch_size=batch_size)
vgg.finetune(batches)
vgg.fit(batches, val_batches, nb_epoch=1)
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
FILES_PATH = 'http://files.fast.ai/models/'; CLASS_FILE='imagenet_class_index.json'
# Keras' get_file() is a handy function that downloads files, and caches them for re-use later
fpath = get_file(CLASS_FILE, FILES_PATH+CLASS_FILE, cache_subdir='models')
with open(fpath) as f: class_dict = json.load(f)
# Convert dictionary with string indexes into an array
classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
classes[:5]
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
def FCBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
# Mean of each channel as provided by VGG researchers
vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean # subtract mean
return x[:, ::-1] # reverse axis bgr->rgb
def VGG_16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FCBlock(model)
FCBlock(model)
model.add(Dense(1000, activation='softmax'))
return model
model = VGG_16()
fpath = get_file('vgg16.h5', FILES_PATH+'vgg16.h5', cache_subdir='models')
model.load_weights(fpath)
batch_size = 4
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True,
batch_size=batch_size, class_mode='categorical'):
return gen.flow_from_directory(path+dirname, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
batches = get_batches('train', batch_size=batch_size)
val_batches = get_batches('valid', batch_size=batch_size)
imgs,labels = next(batches)
# This shows the 'ground truth'
plots(imgs, titles=labels)
def pred_batch(imgs):
preds = model.predict(imgs)
idxs = np.argmax(preds, axis=1)
print('Shape: {}'.format(preds.shape))
print('First 5 classes: {}'.format(classes[:5]))
print('First 5 probabilities: {}\n'.format(preds[0, :5]))
print('Predictions prob/class: ')
for i in range(len(idxs)):
idx = idxs[i]
print (' {:.4f}/{}'.format(preds[i, idx], classes[idx]))
pred_batch(imgs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define path to data
Step2: A few basic libraries that we'll need for the initial exercises
Step3: We have created a file most imaginatively called 'utils.py' to store any little convenience functions we'll want to use. We will discuss these as we use them.
Step4: Use a pretrained VGG model with our Vgg16 class
Step5: The code above will work for any image recognition task, with any number of categories! All you have to do is to put your images into one folder per category, and run the code above.
Step6: Vgg16 is built on top of Keras (which we will be learning much more about shortly!), a flexible, easy to use deep learning library that sits on top of Theano or Tensorflow. Keras reads groups of images and labels in batches, using a fixed directory structure, where images from each category for training must be placed in a separate folder.
Step7: (BTW, when Keras refers to 'classes', it doesn't mean python classes - but rather it refers to the categories of the labels, such as 'pug', or 'tabby'.)
Step8: As you can see, the labels for each image are an array, containing a 1 in the first position if it's a cat, and in the second position if it's a dog. This approach to encoding categorical variables, where an array containing just a single 1 in the position corresponding to the category, is very common in deep learning. It is called one hot encoding.
Step9: We can now pass the images to Vgg16's predict() function to get back probabilities, category indexes, and category names for each image's VGG prediction.
Step10: The category indexes are based on the ordering of categories used in the VGG model - e.g here are the first four
Step11: (Note that, other than creating the Vgg16 object, none of these steps are necessary to build a model; they are just showing how to use the class to view imagenet predictions.)
Step12: Calling finetune() modifies the model such that it will be trained based on the data in the batches provided - in this case, to predict either 'dog' or 'cat'.
Step13: Finally, we fit() the parameters of the model using the training data, reporting the accuracy on the validation set after every epoch. (An epoch is one full pass through the training data.)
Step14: That shows all of the steps involved in using the Vgg16 class to create an image recognition model using whatever labels you are interested in. For instance, this process could classify paintings by style, or leaves by type of disease, or satellite photos by type of crop, and so forth.
Step15: Let's import the mappings from VGG ids to imagenet category ids and descriptions, for display purposes later.
Step16: Here's a few examples of the categories we just imported
Step17: Model creation
Step18: ...and here's the fully-connected definition.
Step19: When the VGG model was trained in 2014, the creators subtracted the average of each of the three (R,G,B) channels first, so that the data for each channel had a mean of zero. Furthermore, their software that expected the channels to be in B,G,R order, whereas Python by default uses R,G,B. We need to preprocess our data to make these two changes, so that it is compatible with the VGG model
Step20: Now we're ready to define the VGG model architecture - look at how simple it is, now that we have the basic blocks defined!
Step21: We'll learn about what these different blocks do later in the course. For now, it's enough to know that
Step22: As well as the architecture, we need the weights that the VGG creators trained. The weights are the part of the model that is learnt from the data, whereas the architecture is pre-defined based on the nature of the problem.
Step23: Getting imagenet predictions
Step24: Keras provides functionality to create batches of data from directories containing images; all we have to do is to define the size to resize the images to, what type of labels to create, whether to randomly shuffle the images, and how many images to include in each batch. We use this little wrapper to define some helpful defaults appropriate for imagenet data
Step25: From here we can use exactly the same steps as before to look at predictions from the model.
Step26: The VGG model returns 1,000 probabilities for each image, representing the probability that the model assigns to each possible imagenet category for each image. By finding the index with the largest probability (with np.argmax()) we can find the predicted label.
|
6,263
|
<ASSISTANT_TASK:>
Python Code:
from games import *
from notebook import psource, pseudocode
%psource Game
%psource TicTacToe
moves = dict(A=dict(a1='B', a2='C', a3='D'),
B=dict(b1='B1', b2='B2', b3='B3'),
C=dict(c1='C1', c2='C2', c3='C3'),
D=dict(d1='D1', d2='D2', d3='D3'))
utils = dict(B1=3, B2=12, B3=8, C1=2, C2=4, C3=6, D1=14, D2=5, D3=2)
initial = 'A'
print(moves['A']['a1'])
fig52 = Fig52Game()
psource(Fig52Game.actions)
print(fig52.actions('B'))
psource(Fig52Game.result)
print(fig52.result('A', 'a1'))
psource(Fig52Game.utility)
print(fig52.utility('B1', 'MAX'))
print(fig52.utility('B1', 'MIN'))
psource(Fig52Game.terminal_test)
print(fig52.terminal_test('C3'))
psource(Fig52Game.to_move)
print(fig52.to_move('A'))
psource(Fig52Game)
pseudocode("Minimax-Decision")
psource(minimax_decision)
print(minimax_decision('B', fig52))
print(minimax_decision('C', fig52))
print(minimax_decision('D', fig52))
print(minimax_decision('A', fig52))
from notebook import Canvas_minimax
from random import randint
minimax_viz = Canvas_minimax('minimax_viz', [randint(1, 50) for i in range(27)])
pseudocode("Alpha-Beta-Search")
%psource alphabeta_search
print(alphabeta_search('A', fig52))
print(alphabeta_search('B', fig52))
print(alphabeta_search('C', fig52))
print(alphabeta_search('D', fig52))
from notebook import Canvas_alphabeta
from random import randint
alphabeta_viz = Canvas_alphabeta('alphabeta_viz', [randint(1, 50) for i in range(27)])
game52 = Fig52Game()
print(random_player(game52, 'A'))
print(random_player(game52, 'A'))
print( alphabeta_player(game52, 'A') )
print( alphabeta_player(game52, 'B') )
print( alphabeta_player(game52, 'C') )
minimax_decision('A', game52)
alphabeta_search('A', game52)
game52.play_game(alphabeta_player, alphabeta_player)
game52.play_game(alphabeta_player, random_player)
game52.play_game(query_player, alphabeta_player)
game52.play_game(alphabeta_player, query_player)
ttt = TicTacToe()
ttt.display(ttt.initial)
my_state = GameState(
to_move = 'X',
utility = '0',
board = {(1,1): 'X', (1,2): 'O', (1,3): 'X',
(2,1): 'O', (2,3): 'O',
(3,1): 'X',
},
moves = [(2,2), (3,2), (3,3)]
)
ttt.display(my_state)
random_player(ttt, my_state)
random_player(ttt, my_state)
alphabeta_player(ttt, my_state)
ttt.play_game(random_player, alphabeta_player)
for _ in range(10):
print(ttt.play_game(alphabeta_player, alphabeta_player))
for _ in range(10):
print(ttt.play_game(random_player, alphabeta_player))
from notebook import Canvas_TicTacToe
bot_play = Canvas_TicTacToe('bot_play', 'random', 'alphabeta')
rand_play = Canvas_TicTacToe('rand_play', 'human', 'random')
ab_play = Canvas_TicTacToe('ab_play', 'human', 'alphabeta')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GAME REPRESENTATION
Step2: Now let's get into details of all the methods in our Game class. You have to implement these methods when you create new classes that would represent your game.
Step3: The class TicTacToe has been inherited from the class Game. As mentioned earlier, you really want to do this. Catching bugs and errors becomes a whole lot easier.
Step4: In moves, we have a nested dictionary system. The outer's dictionary has keys as the states and values the possible moves from that state (as a dictionary). The inner dictionary of moves has keys the move names and values the next state after the move is complete.
Step5: We will now take a look at the functions we need to implement. First we need to create an object of the Fig52Game class.
Step6: actions
Step7: result
Step8: utility
Step9: terminal_test
Step10: to_move
Step11: As a whole the class Fig52 that inherits from the class Game and overrides its functions
Step12: MIN-MAX
Step13: Implementation
Step14: Example
Step15: Now MAX knows that the values for B, C and D are 3, 2 and 2 (produced by the above moves of MIN). The greatest is 3, which he will get with move a1. This is then the move MAX will make. Let's see the algorithm in full action
Step16: Visualization
Step17: ALPHA-BETA
Step18: Implementation
Step19: Example
Step20: The optimal move for MAX is a1, for the reasons given above. MIN will pick move b1 for B resulting in a value of 3, updating the a value of MAX to 3. Then, when we find under C a node of value 2, we will stop searching under that sub-tree since it is less than a. From D we have a value of 2. So, the best move for MAX is the one resulting in a value of 3, which is a1.
Step21: Visualization
Step22: PLAYERS
Step23: First we try out our random_player(game, state). Given a game state it will give us a random move every time
Step24: The alphabeta_player(game, state) will always give us the best move possible, for the relevant player (MAX or MIN)
Step25: What the alphabeta_player does is, it simply calls the method alphabeta_full_search. They both are essentially the same. In the module, both alphabeta_full_search and minimax_decision have been implemented. They both do the same job and return the same thing, which is, the best move in the current state. It's just that alphabeta_full_search is more efficient with regards to time because it prunes the search tree and hence, explores lesser number of states.
Step26: Demonstrating the play_game function on the game52
Step27: Note that if you are the first player then alphabeta_player plays as MIN, and if you are the second player then alphabeta_player plays as MAX. This happens because that's the way the game is defined in the class Fig52Game. Having a look at the code of this class should make it clear.
Step28: We can print a state using the display method
Step29: Hmm, so that's the initial state of the game; no X's and no O's.
Step30: So, how does this game state look like?
Step31: The random_player will behave how he is supposed to i.e. pseudo-randomly
Step32: But the alphabeta_player will always give the best move, as expected
Step33: Now let's make two players play against each other. We use the play_game function for this. The play_game function makes players play the match against each other and returns the utility for the first player, of the terminal state reached when the game ends. Hence, for our TicTacToe game, if we get the output +1, the first player wins, -1 if the second player wins, and 0 if the match ends in a draw.
Step34: The output is (usually) -1, because random_player loses to alphabeta_player. Sometimes, however, random_player manages to draw with alphabeta_player.
Step35: A random_player should never win against an alphabeta_player. Let's test that.
Step36: Canvas_TicTacToe(Canvas)
Step37: Now, let's play a game ourselves against a random_player
Step38: Yay! We (usually) win. But we cannot win against an alphabeta_player, however hard we try.
|
6,264
|
<ASSISTANT_TASK:>
Python Code:
#Normal inputs
import pandas as pd
import numpy as np
import seaborn as sns
import pylab as plt
%matplotlib inline
from IPython.display import Image, display
#Make the notebook wider
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
#Create a toy dataframe
df = pd.DataFrame([[1,2,3],[1,2,3]],columns=["a","b","c"])
df
#Saving the toy dataframe above
df.to_csv("data/toy_df.csv",sep="\t",index=False)
Image(url="http://www.abs.gov.au/websitedbs/a3121120.nsf/4a256353001af3ed4b2562bb00121564/5869990518bbb7feca257b55002261e0/Body/3.109A!OpenElement&FieldElemFormat=gif")
%%html
<h3>There are three variables in this data set. What are they? </h3>
<table>
<tr> <th></th> <th>2010</th> <th>2015</th> </tr>
<tr> <td>Male</td> <td>0</td> <td>5</td> </tr>
<tr> <td>Female</td> <td>1</td> <td>4</td> </tr>
</table>
%%html
<h3>Tidy data</h3>
<table>
<tr> <th>year</th> <th>sex</th> <th>count</th> </tr>
<tr> <td>2010</td> <td>female</td> <td>1</td> </tr>
<tr> <td>2010</td> <td>male</td> <td>0</td> </tr>
<tr> <td>2015</td> <td>female</td> <td>4</td> </tr>
<tr> <td>2015</td> <td>male</td> <td>5</td> </tr>
</table>
#Example of melting (columns to rows)
print("Melting")
Image("figures/melt.png",width=400)
#Example of pivoting (rows to columns)
print("Pivoting")
Image("figures/pivot.png",width=800)
#Data on population density by year
df = pd.read_csv("data/columns_headers.csv")
df.head()
### Fix
#Variables present in columns already
variables_already_presents = ['METRO_ID', 'Metropolitan areas']
#Variables to combine in one
columns_combine = ['2000', '2001', '2002','2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011','2012', '2013', '2014']
#Fix
fixed = pd.melt(df,id_vars=variables_already_presents,
value_vars=columns_combine,
var_name="Year",
value_name="POP_DENS")
fixed.head(10)
#Actual data from OECD
df = pd.read_csv("data/multiple_variables.csv")
df.head(2)
### Fix
#Columns already present
variables_already_present = ["METRO_ID","Metropolitan areas","Year"]
#Column with the variables
column_to_split = ["VAR"]
#Column with the values
column_with_values = "Value"
#Fix
df_fixed = df.pivot_table(column_with_values,
variables_already_present,
column_to_split)
df_fixed.head()
#use .reset_index() to convert index to columns
df_fixed.reset_index()
df = pd.read_csv("data/multiple_variables_and_headers.csv")
df.head()
print(list(df.columns)[4:])
cols = []
for column in df.columns:
try:
int(column)
cols.append(column)
except:
pass
print(cols)
### Fix setp 1
variables_already_presents = ['METRO_ID', 'Metropolitan areas','VAR']
columns_combine = cols
df = pd.melt(df,
id_vars=variables_already_presents,
value_vars=columns_combine,
var_name="Year",
value_name="Value")
df.head()
### Fix step 2
column_with_values = "Value"
column_to_split = ["VAR"]
variables_already_present = ["METRO_ID","Metropolitan areas","Year"]
df.pivot_table(column_with_values,
variables_already_present,
column_to_split).reset_index().head()
#Add more observations
Image(url="http://pandas.pydata.org/pandas-docs/stable/_images/merging_concat_basic.png")
madrid = pd.read_csv("data/mad_twovariables.csv").head(2)
amsterdam = pd.read_csv("data/ams_twovariables.csv").head(2)
display(madrid)
display(amsterdam)
combined = pd.concat([madrid,amsterdam])
combined
a = pd.DataFrame([[1,2],[3,4]],columns=["a","b"])
b = pd.DataFrame([[1,2],[3,4]],columns=["c","b"])
display(a)
display(b)
pd.concat([a,b])
Image(url="http://pandas.pydata.org/pandas-docs/stable/_images/merging_merge_on_key.png",width=700)
ams_green = pd.read_csv("data/ams_green.csv")
ams_pop = pd.read_csv("data/ams_popdens.csv")
display(ams_green.head(2))
display(ams_pop.head(2))
combined = pd.merge(ams_green,ams_pop,
left_on=["METRO_ID","Metropolitan areas","Year"],
right_on=["METRO_ID","Metropolitan areas","Year"],
how="inner")
combined.head()
print("Inner join pd.merge(left_df,right_df,how=\"inner\")")
display(Image(url="http://pandas.pydata.org/pandas-docs/stable/_images/merging_merge_on_key_inner.png"))
print("Right join pd.merge(left_df,right_df,how=\"right\")", "The same with left")
display(Image(url="http://pandas.pydata.org/pandas-docs/stable/_images/merging_merge_on_key_right.png"))
print("Outer join pd.merge(left_df,right_df,how=\"outer\")")
display(Image(url="http://pandas.pydata.org/pandas-docs/stable/_images/merging_merge_on_key_outer.png"))
def mean_(some_numbers):
return np.mean(some_numbers)
a = mean_([1,2,3])
print(a)
np.mean([1,2,3])
def read_our_csv():
#reading the raw data from oecd
df = pd.read_csv("data/CITIES_19122016195113034.csv",sep="\t")
#fixing the columns (the first one is ""METRO_ID"" instead of "METRO_ID")
cols = list(df.columns)
cols[0] = "METRO_ID"
df.columns = cols
#pivot the table
column_with_values = "Value"
column_to_split = ["VAR"]
variables_already_present = ["METRO_ID","Metropolitan areas","Year"]
df_fixed = df.pivot_table(column_with_values,
variables_already_present,
column_to_split).reset_index()
return df_fixed
df_fixed = read_our_csv()
df_fixed.head()
#Read and fix the data
df_fixed = read_our_csv()
df_fixed = df_fixed.loc[df_fixed["Year"]==2000,["GREEN_AREA_PC","POP_DENS"]]
df_fixed.head()
plt.figure(figsize=(6,4))
plt.scatter(df_fixed["GREEN_AREA_PC"],df_fixed["POP_DENS"],edgecolor="none",color=(70/255,140/255,210/255))
plt.yscale("log")
plt.xscale("log")
plt.ylim(10,10000)
plt.xlim(10,10000)
plt.xlabel("Green area (m^2/million people)")
plt.ylabel("Density population (people/km^s)")
#fraction of green area
df_fixed["fraction_green"] = df_fixed["GREEN_AREA_PC"]*df_fixed["POP_DENS"]/1E6
#make the figure
plt.figure(figsize=(6,4))
plt.scatter(df_fixed["fraction_green"],df_fixed["POP_DENS"],edgecolor="none",color=(70/255,140/255,210/255))
plt.yscale("log")
plt.xscale("log")
plt.ylim(10,10000)
plt.xlim(0.0001,1)
plt.xlabel("Percentage green (m^2 green / km^2 land)")
plt.ylabel("Density population (population/km^2)")
#Imagine we want to convert the income to a category
df = pd.DataFrame({"kids": [2,3,3,1,3,1,4,4,2], "income": [">100",">100","<40","<40","40-100","<40","40-100",">100","40-100"]})
df
d_cat2income = {">100": 100,
"40-100": 70,
"<40": 40}
d_cat2income
d_cat2income.get(">90")
df["income_number"] = df["income"].apply(lambda x:
x[0])
df.head()
#First we need a dictionary
d_cat2income = {">100": 100, "40-100": 70, "<40": 40}
def category2income(value):
return d_cat2income.get(value)
df = pd.DataFrame({"kids": [2,3,3,1,3,1,4,4,2], "income": [">100",">100","40-100","<40","40-100","<40","40-100",">100","40-100"]})
df["income"] = df["income"].apply(category2income)
df.head()
#Or without creating a new function
df = pd.DataFrame({"kids": [2,3,3,1,3,1,4,4,2], "income": [">100",">100","40-100","<40","40-100","<40","40-100",">100","40-100"]})
df["income"] = df["income"].apply(lambda x: d_cat2income.get(x))
df.head()
df_fixed = read_our_csv().loc[:,["METRO_ID","Year"]]
df_fixed.head()
def keep_2_letters(value):
return value[:2]
df_fixed["Country"] = df_fixed["METRO_ID"].apply(keep_2_letters)
df_fixed.head()
#or in one line
df_fixed["Country"] = df_fixed["METRO_ID"].apply(lambda x: x[:2])
df_fixed.head()
#for example doing the log of a column. This is useful when the distribution is lognormal (such as income, gdp).
#But somehow confusing, the distribution of the income of many people IS LOGNORMAL.
#The distribution of the mean income of many cities IS NOT LOGNORMAL.
df_fixed = read_our_csv().loc[:,["METRO_ID","Year","GDP_PC"]]
df_fixed["GDP_PC_log"] = np.log10(df_fixed["GDP_PC"])
df_fixed.head()
#Read and fix the data
df_fixed = read_our_csv()
#Remove rows with missing values
cols = ["LABOUR_PRODUCTIVITY","UNEMP_R","GDP_PC"]
df_fixed = df_fixed.dropna(subset=cols)
#Creating a column for country
df_fixed["C"] = df_fixed["METRO_ID"].apply(lambda x: x[:2])
#Keeping italy
df_fixed = df_fixed.loc[df_fixed["C"]=="IT",["C","METRO_ID","Metropolitan areas"] +cols]
#We are going to normalize values dividing by the mean (so new values have a mean of 1)
df_fixed.loc[:,cols] = df_fixed[cols]/np.nanmean(df_fixed[cols],0)
#Make a copy of the data
df_original = df_fixed.copy()
#Take a random sample of 20 values of productivity
sample = set(df_fixed.loc[:,"LABOUR_PRODUCTIVITY"].sample(20))
#Deleting those values (saying that they are np.NaN (missing))
df_fixed.loc[df_fixed["LABOUR_PRODUCTIVITY"].isin(sample),"LABOUR_PRODUCTIVITY"] = np.NaN
df_fixed.head()
#How to fix by ignoring the rows
ignoring = df_fixed.dropna(subset=["LABOUR_PRODUCTIVITY"])
#How to fix by imputing with mean/median/mode
mean_inputed = df_fixed.fillna(df_fixed.mean())
#Based on this. Similar points for unemployment have similar points for productivity
sns.lmplot(x="LABOUR_PRODUCTIVITY",y="UNEMP_R",data=df_fixed,fit_reg=False)
Image("figures/labels.png")
print("Using a random sample => MCAR DATA")
Image("figures/kmeans_vs_mean.png")
print("Using a biasad sample => MAR DATA")
Image("figures/kmeans_vs_mean_worst_case.png")
#Read and fix the data. also drop missing values
df = read_our_csv().loc[:,["METRO_ID","Year","Metropolitan areas","LABOUR_PRODUCTIVITY","UNEMP_R"]]
#Drop rows with missing values
df = df.dropna()
#Keep years 2000 and 2012
df = df.loc[df["Year"].isin([2000,2012])]
#Add a column for country
df["C"] = df["METRO_ID"].apply(lambda x: x[:2] if isinstance(x,str) else "")
df.head()
sns.lmplot?
keep_countries = ['IT', 'NL', 'DE', 'ES', 'GR']
sns.lmplot(x="LABOUR_PRODUCTIVITY",y="UNEMP_R",hue="C",col="Year"
,data=df.loc[df["C"].isin(keep_countries)],fit_reg=False,
size=3,aspect=1.4,sharex=True,sharey=True)
plt.show()
#Roll two dices 100 times
dice_rolls = np.random.randint(1,7,1000) + np.random.randint(1,7,1000)
dice_rolls
#Count the number of each element to create the distribution
from collections import Counter
Counter(dice_rolls)
#And we can visualize it with a histogram
sns.distplot(dice_rolls, kde=False, rug=False)
#Read and fix the data. also drop missing values
df = read_our_csv().loc[:,["METRO_ID","Year","Metropolitan areas","LABOUR_PRODUCTIVITY","UNEMP_R"]]
#Drop rows with missing values
df = df.dropna()
#Keep years 2000 and 2012
df = df.loc[df["Year"].isin([2000,2012])]
#Add a column for country
df["C"] = df["METRO_ID"].apply(lambda x: x[:2] if isinstance(x,str) else "")
df.head()
#Keep x and y values
x = df["LABOUR_PRODUCTIVITY"]
#Make plot
plt.figure(figsize=(6,4))
sns.distplot(x, kde=True, rug=False,hist_kws={"edgecolor":"none"},kde_kws={"cut":0})
plt.ylabel("Frequency")
plt.xlabel("Labour productivity")
#fitting a distribution
from scipy.stats import norm,lognorm,expon
#Keep x and y values
x = df["LABOUR_PRODUCTIVITY"]
#Make plot
plt.figure(figsize=(6,4))
sns.distplot(x, kde=False, fit=norm, rug=False,
hist_kws={"edgecolor":"none","normed":True},fit_kws={"cut":0,"color":"gray"})
plt.ylabel("Frequency")
plt.xlabel("Labour productivity")
sns.jointplot?
#Make plot
sns.jointplot(x="LABOUR_PRODUCTIVITY", y="UNEMP_R", data=df,size=6)
plt.ylabel("Frequency")
plt.xlabel("Labour productivity")
plt.show()
sns.jointplot(x="LABOUR_PRODUCTIVITY", y="UNEMP_R", data=df,kind="hex",
marginal_kws=dict(bins=20, rug=False, kde=True, kde_kws={"cut":0}), gridsize = 15,size=6)
cols_get = ["METRO_ID","Metropolitan areas","Year","UNEMP_R",'GDP_PC',"LABOUR_PRODUCTIVITY"]
df_fixed = read_our_csv().loc[:,cols_get]
df_fixed["C"] = df_fixed["METRO_ID"].apply(lambda x: x[:2] if isinstance(x,str) else "")
south = df_fixed.loc[df_fixed["C"].isin(["IT","ES","GR","DE"])]
south.head()
plt.figure(figsize=(6,4))
sns.violinplot(x="C", y="LABOUR_PRODUCTIVITY", data=south,width=1,inner="quartiles")
from pandas.tools.plotting import parallel_coordinates
cols_get = ["METRO_ID","Metropolitan areas","Year","UNEMP_R",'GDP_PC',"LABOUR_PRODUCTIVITY"]
df_fixed = read_our_csv().loc[:,cols_get]
df_fixed["C"] = df_fixed["METRO_ID"].apply(lambda x: x[:2] if isinstance(x,str) else "")
italy = df_fixed.loc[df_fixed["C"].isin(["IT"])]
italy = italy.loc[italy["Year"].isin([2001])]
italy.head()
#Here we normalize the variables so th
cols = ["UNEMP_R",'GDP_PC',"LABOUR_PRODUCTIVITY"]
for c in cols:
italy[c] = (italy[c]- np.mean(italy[c]))/np.std(italy[c])
parallel_coordinates(italy.loc[(italy["Metropolitan areas"] !="Italy")],"Year",cols=cols,colormap="Paired")
d_city2regio = {"Bari": "S",
"Bologna": "C",
"Catania": "S",
"Florence": "C",
"Genova": "N",
"Milan": "N",
"Naples": "S",
"Palermo": "S",
"Rome": "C",
"Turin": "N",
"Venice": "N"}
italy["Area"] = italy["Metropolitan areas"].apply(lambda x: d_city2regio.get(x))
parallel_coordinates(italy.loc[(italy["Metropolitan areas"] !="Italy")],"Area",cols=cols,colormap="Paired")
cols_get = ["METRO_ID","Metropolitan areas","Year","UNEMP_R",'GDP_PC',"LABOUR_PRODUCTIVITY"]
df_fixed = read_our_csv().loc[:,cols_get]
df_fixed["C"] = df_fixed["METRO_ID"].apply(lambda x: x[:2] if isinstance(x,str) else "")
south = df_fixed.loc[df_fixed["C"].isin(["IT","ES","GR","DE"])]
south.head()
sns.factorplot(x="Year", y="UNEMP_R", hue="C", size=4,aspect=1.8,data=south,ci=95)
sns.factorplot(x="Year", y="UNEMP_R", hue="C", size=4,aspect=3,
data=south.loc[south["Year"].isin([2000,2010])],kind="violin",width=0.9,inner="quartiles")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Save CSV to disk (df.to_csv())
Step2: 2. Tidy data
Step3: 2.2 What is tidy data?
Step4: 2.3 Tidying messy datasets
Step5: Data to study
Step6: 2.5 Multiple variables are stored in one column
Step7: 2.6 Variables are stored in both rows and columns
Step8: 3. Merging and concatenating datasets
Step9: Note that it's not a problem if the variables are not all the same
Step10: 3.2 Merge datasets
Step11: 4. Change columns, replace and apply functions
Step12: 4.1 Multiply columns
Step13: Is there a relationship between green area per capita and population density? Why?
Step14: 4.2 Categories to numbers
Step15: 4.2 Keeping part of a the values in a column (text)
Step16: 4.3 Doing math with the column
Step17: 5. Dealing with missing data
Step18: 4.1 Ignoring data
Step19: 4.3 Imputing with mean/median/mode
Step20: 4.4 Imputing using neighbous
Step21: But first of all we need to make sure we have missing values instead of "-9" or other strings
Step22: 6.0 SCATTER using sns.lmplot
Step23: 6.1a HISTOGRAM
Step24: A more useful example using OECD data
Step25: 6.1b TWO HISTOGRAMS
Step26: 6.1c sns.violinplot
Step27: 6.2 parallel coordinates
Step28: We are going to normalize values using standard scores
Step29: 6.3 Factorplot
|
6,265
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv('../data/raw_running_data.csv')
print(type(df))
df.head(10)
?pd.read_csv()
df.dtypes
df.columns
df.index
df['Date'].head()
df = pd.read_csv('../data/raw_running_data.csv', parse_dates=['Date'])
df.Date.head()
df.set_index('Date', inplace=True)
df.plot()
?df.plot
df.plot(kind='bar')
df.plot(kind='area')
plt.ylabel("Count")
df
df.describe()
df['2014-11']
len(df['2014-11'])
from datetime import datetime
start = datetime(2014,8,1)
end = datetime(2015,1,2)
start = df.index.min()
end = df.index.max()
print(start, end)
d = pd.DataFrame(index=pd.date_range(start, end))
pd.datetime(2014,8,1)
d.head()
from IPython.display import Image
Image(url='http://i.stack.imgur.com/GbJ7N.png')
df.join(d).head(10)
d.join(df).head(10)
df.join(d, how='outer')
df = df.join(d, how='outer')
df.head()
df.plot()
df.Miles.fillna(0,inplace=True)
df.head()
df.plot()
df.to_csv('../data/date_fixed_running_data.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You'll also notice that there are a TON of extra parameters that can be passed into this function, we can skip rows, specify dtypes, if there's compression or not, the index column, how many chunks or rows to read. This function could be a whole video itself. The important part is, when you're reading files, you've got a lot of options. You can do a lot of this processing after the fact and if you have any specific questions please don't hesistate to ask.
Step2: Now for a little background on this dataset. This is my exercise data from august 1st 2014 to January 1st 2015. During these several months I recorded the distance every time that I ran and recorded the time the majority of the time that took as well.
Step3: Now we can see our 3 columns here, date, miles and time. We can see that we’ve got some NaN values when I didn’t record the time. We saw those in the head command above.
Step4: Now pandas is absolutely fantastic with time series data. It’s got a ton of cool functionality but before we can get to that we’ve got to set our index as that time data.
Step5: Now when we go to get our date Series, we can see that the dtype is object. That’s not write, we want something that’s date based. Specifically datetime64.
Step6: Boom now we’ve got the correct dates. Now pandas is pretty good at parsing dates but occasionally it will mess them up. You may have to write a custom date parser but doing so is outside the scope of this course.
Step7: Let’s go ahead and plot it.
Step8: You’ll see that it prints what it can, it doesn’t understand what our time data is yet so it doesn’t plot it. We’ll address that a bit later.
Step9: Now you can see there are different ways of plotting this date. The default is line but we also have bar charts, or area charts, or density charts (with appropriate kinds of data).
Step10: Now by default pandas uses matplotlib to plot its graphs. Our first line in this notebook is a magic function that makes these matplotlib graphs be created in our notebook.
Step11: Now that we’ve printed our the data in our data frame both as a graph and as a table. Let’s try summarizing it a bit better. We can do that better with the describe command. This is going to give us some basic summary statistics about our data, the number of points we have, the mean and standard deviation as well as min and max and quartiles.
Step12: Now that we’ve described it, and when we look at the data itself.
Step13: This kind of thing comes up time and time again in data analysis. You’re analyzing some data trying to get patterns out of it, you notice some weird inconsistencies and then realize the data was collected in a strange way. However pandas makes it easy to fill in those gaps.
Step14: We can also avoid importing datetime by just using the built in datetime function which creates one in the exact same way.
Step15: Of course there are multiple ways to perform this operation but I’m going to put it in as a join to illustrate and import concepts. Joins. A join is like a SQL join or a venn diagram.
Step16: So we performed an outer join. There are other kinds of joins like inner joins, left and right that you can look up in the documentation.
Step17: Alright, now remember we’re just going to use the fillna method to fill it in. Since we’re going to assume I ran 0 miles on those days. We just fill it with 0 and do it inplace. Now our plot looks much better.
Step18: Let’s go ahead and save our work thus far. Doing this is extremely easy. We just specify the location and save a CSV. You can see there are numerous other “to” methods to save to different file formats. I'm not going to cover these in detail but they are there.
|
6,266
|
<ASSISTANT_TASK:>
Python Code:
# Imports
import sys
import pandas as pd
import csv
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20.0, 10.0)
# %load util.py
#!/usr/bin/python
# Util file to import in all of the notebooks to allow for easy code re-use
# Calculate Percent of Attendees that did not speak
def percent_silent(df):
total = len(df)
silent = 0
for row in df.iteritems():
if row[1] == 0:
silent = silent + 1
percent = {}
percent['TOTAL'] = total
percent['SILENT'] = silent
percent['VERBOSE'] = total - silent
return percent
# Calculate Percent of Attendees that left
def percent_left(df):
total = len(df)
left = 0
for row in df.iteritems():
if row[1] == 0:
left = left + 1
percent = {}
percent['TOTAL'] = total
percent['LEFT'] = left
percent['STAYED'] = total - left
return percent
# Calculate Percent of Attendees along gender
def percent_gender(df):
total = len(df)
female = 0
for row in df.iteritems():
if row[1] == 1:
female = female + 1
percent = {}
percent['TOTAL'] = total
percent['FEMALE'] = female
percent['MALE'] = total - female
return percent
# Calculate Percent of Talking points by
def percent_talking_gender(df):
total = 0
male = 0
female = 0
for talks, gender in df.itertuples(index=False):
if talks > 0:
total = total + 1
if gender == 0:
male = male + 1
elif gender == 1:
female = female + 1
percent = {}
percent['TOTAL'] = total
percent['FEMALE'] = female
percent['MALE'] = male
return percent
# Read
data = pd.read_csv('data/2_ux.csv')
# Display
data
# Convert GENDER to Binary (sorry, i know...)
data.loc[data["GENDER"] == "M", "GENDER"] = 0
data.loc[data["GENDER"] == "F", "GENDER"] = 1
# Convert STAYED to 1 and Left/Late to 0
data.loc[data["STAYED"] == "Y", "STAYED"] = 1
data.loc[data["STAYED"] == "N", "STAYED"] = 0
data.loc[data["STAYED"] == "L", "STAYED"] = 0
# We should now see the data in numeric values
data
# Run Describe to give us some basic Min/Max/Mean/Std values
data.describe()
# Run Value_Counts in order to see some basic grouping by attribute
vc_talks = data['TALKS'].value_counts()
vc_talks
vc_gender = data['GENDER'].value_counts()
vc_gender
vc_stayed = data['STAYED'].value_counts()
vc_stayed
# Now let's do some basic plotting with MatPlotLib
data.plot()
data.plot(kind='bar')
fig1, ax1 = plt.subplots()
ax1.pie(data['TALKS'], autopct='%1.f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
data_hostless = data.drop(data.index[[0,1]])
data_hostless.head()
data_hostless.describe()
dh_vc_talks = data_hostless['TALKS'].value_counts()
dh_vc_talks
dh_vc_gender = data_hostless['GENDER'].value_counts()
dh_vc_gender
dh_vc_stayed = data_hostless['STAYED'].value_counts()
dh_vc_stayed
data_hostless.plot()
data_hostless.plot(kind='bar')
fig1, ax1 = plt.subplots()
ax1.pie(data_hostless['TALKS'], autopct='%1.f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
# Percentage of attendees that were silent during the talk
silent = percent_silent(data['TALKS'])
silent
fig1, ax1 = plt.subplots()
sizes = [silent['SILENT'], silent['VERBOSE']]
labels = 'Silent', 'Talked'
explode = (0.05, 0)
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.0f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
# Percentage of attendees that left early during the talk
left = percent_left(data['STAYED'])
left
fig1, ax1 = plt.subplots()
sizes = [left['LEFT'], left['STAYED']]
labels = 'Left', 'Stayed'
explode = (0.1, 0)
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.0f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
# Percentage of attendees that were Male vs. Female (see notes above around methodology)
gender = percent_gender(data['GENDER'])
gender
fig1, ax1 = plt.subplots()
sizes = [gender['FEMALE'], gender['MALE']]
labels = 'Female', 'Male'
explode = (0.1, 0)
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.0f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
# Calculate Percent of Talking points by GENDER
distribution = percent_talking_gender(data[['TALKS','GENDER']])
distribution
fig1, ax1 = plt.subplots()
sizes = [distribution['FEMALE'], distribution['MALE']]
labels = 'Female Speakers', 'Male Speakers'
explode = (0.1, 0)
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.0f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading the Data
Step2: Sanitizing the Data
Step3: Analysis and Visualization (V1)
Step4: Analysis and Visualization (V2)
Step5: this is still pretty bad...
|
6,267
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
epochs = 10
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x32
conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x32
conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x16
conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x16
conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x32
conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
epochs = 30
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
6,268
|
<ASSISTANT_TASK:>
Python Code:
from nltk.book import *
print(sent1)
print(sent3)
print(sent5)
print(text6)
print(text6.name)
print("This text has %d words" % len(text6.tokens))
print("The first hundred words are:", " ".join( text6.tokens[:100] ))
print(text5[0])
print(text3[0:11])
print(text4[0:51])
text6.concordance( "swallow" )
text6.concordance('Arthur', lines=37)
text6.concordance('Arthur', width=100)
word_to_count = "KNIGHT"
print("The word %s appears %d times." % ( word_to_count, text6.count( word_to_count ) ))
t6_vocab = text6.vocab()
t6_words = list(t6_vocab.keys())
print("The text has %d different words" % ( len( t6_words ) ))
print("Some arbitrary 50 of these are:", t6_words[:50])
print("The most frequent 50 words are:", t6_vocab.most_common(50))
print("The word swallow appears %d times" % ( t6_vocab['swallow'] ))
print("The text has %d words that appear only once" % ( len( t6_vocab.hapaxes() ) ))
print("Some arbitrary 100 of these are:", t6_vocab.hapaxes()[:100])
print("Here we assert something that is true.")
for w in t6_words:
assert text6.count( w ) == t6_vocab[w]
print("See, that worked! Now we will assert something that is false, and we will get an error.")
for w in t6_words:
assert w.lower() == w
# With a list comprehension
long_words = [ w for w in t6_words if len( w ) > 5 and t6_vocab[w] > 3 ]
# The long way, with a for loop. This is identical to the above.
long_words = []
for w in t6_words:
if( len ( w ) > 5 and t6_vocab[w] > 3 ):
long_words.append( w )
print("The reasonably frequent long words in the text are:", long_words)
print("\nUp to twenty collocations")
text6.collocations()
print("\nUp to fifty collocations")
text6.collocations(num=50)
print("\nCollocations that might have one word in between")
text6.collocations(window_size=3)
%pylab --no-import-all inline
word_length_dist = FreqDist( [ len(w) for w in t6_vocab.keys() ] )
word_length_dist.plot()
text6.dispersion_plot(["coconut", "swallow", "KNIGHT", "witch", "ARTHUR"])
text2.dispersion_plot(["Elinor", "Marianne", "Edward", "Willoughby", "Lucy"])
def print_text_stats( thetext ):
# Average word length
awl = sum([len(w) for w in thetext]) / len( thetext )
ld = len( thetext ) / len( thetext.vocab() )
print("%.2f\t%.2f\t%s" % ( awl, ld, thetext.name ))
all_texts = [ text1, text2, text3, text4, text5, text6, text7, text8, text9 ]
print("Wlen\tLdiv\tTitle")
for t in all_texts:
print_text_stats( t )
from nltk import word_tokenize
# You can read the file this way:
f = open('alice.txt', encoding='utf-8')
raw = f.read()
f.close()
# or you can read it this way.
with open('alice.txt', encoding='utf-8') as f:
raw = f.read()
# Use NLTK to break the text up into words, and put the result into a
# Text object.
alice = Text( word_tokenize( raw ) )
alice.name = "Alice's Adventures in Wonderland"
print(alice.name)
alice.concordance( "cat" )
print_text_stats( alice )
from nltk.corpus import gutenberg
print(gutenberg.fileids())
paradise_lost = Text( gutenberg.words( "milton-paradise.txt" ) )
paradise_lost
print("Length of text is:", len( gutenberg.raw( "milton-paradise.txt" )))
print("Number of words is:", len( gutenberg.words( "milton-paradise.txt" )))
assert( len( gutenberg.words( "milton-paradise.txt" )) == len( paradise_lost ))
print("Number of sentences is:", len( gutenberg.sents( "milton-paradise.txt" )))
print("Number of paragraphs is:", len( gutenberg.paras( "milton-paradise.txt" )))
from nltk.corpus import PlaintextCorpusReader
from nltk.corpus.reader.util import read_regexp_block
# Define how paragraphs look in our text files.
def read_hanging_block( stream ):
return read_regexp_block( stream, "^[A-Za-z]" )
corpus_root = 'federalist'
file_pattern = 'federalist_.*\.txt'
federalist = PlaintextCorpusReader( corpus_root, file_pattern, para_block_reader=read_hanging_block )
print("List of texts in corpus:", federalist.fileids())
print("\nHere is the fourth paragraph of the first text:")
print(federalist.paras("federalist_1.txt")[3])
fed1 = Text( federalist.words( "federalist_1.txt" ))
print("The first Federalist Paper has the following word collocations:")
fed1.collocations()
print("\n...and the following most frequent words.")
fed1.vocab().most_common(50)
from nltk.corpus import stopwords
print("We have stopword lists for the following languages:")
print(stopwords.fileids())
print("\nThese are the NLTK-provided stopwords for the German language:")
print(", ".join( stopwords.words('german') ))
print("The most frequent words are: ")
print([word[0] for word in t6_vocab.most_common(50)])
f1_most_frequent = [ w[0] for w in t6_vocab.most_common() if w[0].lower() not in stopwords.words('english') ]
print("\nThe most frequent interesting words are: ", " ".join( f1_most_frequent[:50] ))
import re
def is_interesting( w ):
if( w.lower() in stopwords.words('english') ):
return False
if( w.isupper() ):
return False
return w.isalpha()
f1_most_frequent = [ w[0] for w in t6_vocab.most_common() if is_interesting( w[0] ) ]
print("The most frequent interesting words are: ", " ".join( f1_most_frequent[:50] ))
my_text = alice[305:549]
print(" ". join( my_text ))
print(len( set( my_text )), "words")
from nltk import PorterStemmer, LancasterStemmer, WordNetLemmatizer
porter = PorterStemmer()
lanc = LancasterStemmer()
wnl = WordNetLemmatizer()
porterlist = [porter.stem(w) for w in my_text]
print(" ".join( porterlist ))
print(len( set( porterlist )), "Porter stems")
lanclist = [lanc.stem(w) for w in my_text]
print(" ".join( lanclist ))
print(len( set( lanclist )), "Lancaster stems")
wnllist = [ wnl.lemmatize(w) for w in my_text ]
print(" ".join( wnllist ))
print(len( set( wnllist )), "Wordnet lemmata")
from nltk import pos_tag
print(pos_tag(my_text))
from nltk.corpus import brown
print(brown.tagged_words()[:25])
print(brown.tagged_words(tagset='universal')[:25])
tagged_word_fd = FreqDist([ w[1] for w in brown.tagged_words(tagset='universal') ])
tagged_word_fd.plot()
from nltk import ne_chunk
tagged_text = pos_tag(sent2)
ner_text = ne_chunk( tagged_text )
print(ner_text)
ner_text
def list_named_entities( tree ):
try:
tree.label()
except AttributeError:
return
if( tree.label() != "S" ):
print(tree)
else:
for child in tree:
list_named_entities( child )
list_named_entities( ner_text )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This import statement reads the book samples, which include nine sentences and nine book-length texts. It has also helpfully put each of these texts into a variable for us, from sent1 to sent9 and text1 to text9.
Step2: Let's look at the texts now.
Step3: Each of these texts is an nltk.text.Text object, and has methods to let you see what the text contains. But you can also treat it as a plain old list!
Step4: We can do simple concordancing, printing the context for each use of a word throughout the text
Step5: The default is to show no more than 25 results for any given word, but we can change that.
Step6: We can adjust the amount of context we show in our concordance
Step7: ...or get the number of times any individual word appears in the text.
Step8: We can generate a vocabulary for the text, and use the vocabulary to find the most frequent words as well as the ones that appear only once (a.k.a. the hapaxes.)
Step9: You've now seen two methods for getting the number of times a word appears in a text
Step10: We can try and find interesting words in the text, such as words of a minimum length (the longer a word, the less common it probably is) that occur more than once or twice...
Step11: And we can look for pairs of words that go together more often than chance would suggest.
Step12: NLTK can also provide us with a few simple graph visualizations, when we have matplotlib installed. To make this work in iPython, we need the following magic line. If you are running in PyCharm, then you do not need this line - it will throw an error if you try to use it!
Step13: The vocabulary we get from the .vocab() method is something called a "frequency distribution", which means it's a giant tally of each unique word and the number of times that word appears in the text. We can also make a frequency distribution of other features, such as "each possible word length and the number of times a word that length is used". Let's do that and plot it.
Step14: We can plot where in the text a word occurs, and compare it to other words, with a dispersion plot. For example, the following dispersion plots show respectively (among other things) that the words 'coconut' and 'swallow' almost always appear in the same part of the Holy Grail text, and that Willoughby and Lucy do not appear in Sense and Sensibility until some time after the beginning of the book.
Step15: We can go a little crazy with text statistics. This block of code computes the average word length for each text, as well as a measure known as the "lexical diversity" that measures how much word re-use there is in a text.
Step16: A text of your own
Step17: Using text corpora
Step18: Paradise Lost is now a Text object, just like the ones we have worked on before. But we accessed it through the NLTK corpus reader, which means that we get some extra bits of functionality
Step19: We can also make our own corpus if we have our own collection of files, e.g. the Federalist Papers from last week. But we have to pay attention to how those files are arranged! In this case, if you look in the text file, the paragraphs are set apart with 'hanging indentation' - all the lines
Step20: And just like before, from this corpus we can make individual Text objects, on which we can use the methods we have seen above.
Step21: Filtering out stopwords
Step22: So reading in the stopword list, we can use it to filter out vocabulary we don't want to see. Let's look at our 50 most frequent words in Holy Grail again.
Step23: Maybe we should get rid of punctuation and all-caps words too...
Step24: Getting word stems
Step25: NLTK comes with a few different stemming algorithms; we can also use WordNet (a system for analyzing semantic relationships between words) to look for the lemma form of each word and "stem" it that way. Here are some results.
Step26: Part-of-speech tagging
Step27: NLTK part-of-speech tags (simplified tagset)
Step28: We can even do a frequency plot of the different parts of speech in the corpus (if we have matplotlib installed!)
Step29: Named-entity recognition
Step30: Here is a function that takes the result of ne_chunk (the plain-text form, not the graph form!) and spits out only the named entities that were found.
|
6,269
|
<ASSISTANT_TASK:>
Python Code:
def x_2z_over_dst(z):
w = 2*pi
# beta = 1, wn =w
wd = w*sqrt(1-z*z)
# Clough Penzien p. 43
A = z/sqrt(1-z*z)
def f(t):
return (cos(wd*t)+A*sin(wd*t))*exp(-z*w*t)-cos(w*t)
return pl.vectorize(f)
t = pl.linspace(0,20,1001)
print(t)
zetas = (.02, .05, .10, .20)
print(zetas)
for z in zetas:
# call the function of zeta that returns
# a function of time, assign the name bar_x to this function
bar_x = x_2z_over_dst(z)
# do the plotting...
pl.plot(t,bar_x(t))
pl.ylim((-1.0, 1.0))
pl.title(r'$\zeta=%4.2f$'%(z,))
pl.show()
t = pl.linspace(0,5,501)
for z in zetas:
# call the function of zeta that returns
# a function of time, assign the name bar_x to this function
bar_x = x_2z_over_dst(z)
# do the plotting...
pl.plot(t,bar_x(t)/2/z, label=r'$\zeta=%4.2f$'%(z,))
pl.legend(ncol=5,loc='lower center', fancybox=1, shadow=1, framealpha=.95)
pl.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Above we compute some constants that depend on $\zeta$,
Step2: We want to see what happens for different values of $\zeta$, so we create
Step3: Now, the real plotting
Step4: Wait a minute!
|
6,270
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import log as ln
from itertools import cycle # used to create a loopable colormap
def get_Atom_prop(Atom,Prop):
'''
This is a helper to get certain values from the tables
You can't get the symbol this way
Properties are: 'Z', 'Name', 'MAI Weight', 'MAI Mass', 'M',
'SBE', 'Q84', 'Q96', 'W', 's'
'''
return Atom_props.set_index(['Symbol']).loc[Atom][Prop]
#return Atom_props.get_value(Atom,Prop)
def reduced_energy(Projectile,Target,Energy,sputter_model):
'''
Uses equ. 2 from M84
'''
Z1=get_Atom_prop(Projectile,'Z')
Z2=get_Atom_prop(Target,'Z')
M1=get_Atom_prop(Projectile,'M')
M2=get_Atom_prop(Target,'M')
if sputter_model=="Sigmund":
a=0.469209/((Z1**(2.0/3.0)+Z2**(2.0/3.0))**(1.0/2.0))
return (0.069497*M2*a*Energy)/((M1+M2)*Z1*Z2)
else:
# This will be used for Y96 and M84
return 0.03255/(Z1*Z2*(Z1**(2./3)+Z2**(2./3))**0.5)*M2/(M1+M2)*Energy
def reduced_nuc_stopping_x_section(Projectile,Target,Energy,sputter_model,model):
'''
based on equ. A1
calcultes reduced_energy(Projectile,Target,Energy)
Depending on the last parameter 'model' we either use a Kr-C based stopping power (model=KrC) or in all other
cases a Thomas-Fermi based approach as used in M84
'''
# lets first calculate the reduced energy
eps=reduced_energy(Projectile,Target,Energy,sputter_model)
# we have two different models. If model is KrC we use the one introduced by Eckstein and mentioned in Y96
if model=="KrC":
return 0.5*ln(1+1.2288*eps)/(eps+0.1728*np.sqrt(eps)+0.0088*eps**0.1504)
# in all other cases we use the M84 Thomas-Fermi model
else:
# This is the Thomas Fermi as given in Y96 and M84 we also use this for Sigmund
return 3.441*np.sqrt(eps)*ln(eps+2.718)/(1+6.355*np.sqrt(eps)+eps*(-1.708+6.882*np.sqrt(eps)))
def k(Projectile,Target):
'''
the same expression for k is used in both M84 and Y96.
'''
Z1=get_Atom_prop(Projectile,'Z')#Atoms[1][Atoms[0].index(Projectile)]
Z2=get_Atom_prop(Target,'Z')#Atoms[1][Atoms[0].index(Target)]
M1=get_Atom_prop(Projectile,'M')#Atoms[2][Atoms[0].index(Projectile)]
M2=get_Atom_prop(Target,'M')#Atoms[2][Atoms[0].index(Target)]
return 0.079*((M1+M2)**(3./2))/((M1**(3./2)*M2**(.5)))*((Z1**(2./3)*Z2**(.5))/((Z1**(2./3)+Z2**(2./3))**(3./4)))
def reduced_elec_stopping_x_section(Projectile,Target,Energy,sputter_model):
'''
based on equ A2 and A3
calcultes reduced_energy(Projectile,Target,Energy) first
'''
return k(Projectile,Target)*np.sqrt(reduced_energy(Projectile,Target,Energy,sputter_model))
def Sn(Projectile,Target,Energy,sputter_model,nuc_x_section_model):
'''
Calculats Sn needs the model for the nuclear stopping to calculate the reduced stopping power
'''
Z1=get_Atom_prop(Projectile,'Z')
Z2=get_Atom_prop(Target,'Z')
M1=get_Atom_prop(Projectile,'M')
M2=get_Atom_prop(Target,'M')
if sputter_model=="Sigmund":
a=0.469209/((Z1**(2.0/3.0)+Z2**(2.0/3.0))**(1.0/2.0))
return (18.081264*Z1*Z2*M1*reduced_nuc_stopping_x_section(Projectile,Target,Energy,sputter_model,nuc_x_section_model)*a)/(M1+M2)
else:
# We end up here if we use either M84 or Y96
# the first parameter is 84 in Y96 instead of 8 in M84 due to different units used. This is corrected by also changing the Y(E) from 0.042 (Y96) to 0.42 (M84)
return 8.478*Z1*Z2/(Z1**(2./3)+Z2**(2./3))**0.5*M1/(M1+M2)*reduced_nuc_stopping_x_section(Projectile,Target,Energy,sputter_model,nuc_x_section_model)
def alpha_stern(Projectile,Target,sputter):
'''
based on equ 4
'''
M1=get_Atom_prop(Projectile,'M')
M2=get_Atom_prop(Target,'M')
if sputter == "M84":
return 0.08+0.164*(M2/M1)**0.4+0.0145*(M2/M1)**(1.29)
elif sputter == "Y96":
if M1 <= M2:
#M1 is smaller than M2 will be the case nearly all the time in our situation
return 0.249*(M2/M1)**0.56+0.0035*(M2/M1)**1.5
else:
# obviously now M1 is larger than M2
return 0.0875*(M2/M1)**-0.15+0.165*(M2/M1)
else:
# seems we need to do Sigmund
X=M2/M1
return 0.1694+(0.04218*X)+(0.0518*(X**2))-(0.00926*(X**3))+(0.00049*(X**4))
def eth(Projectile,Target,sputter):
'''
based on equ. 5 but aslo needs tabulated values of Us
'''
if User_SBE==-1:
Us=get_Atom_prop(Target,'SBE')#Atoms[3][Atoms[0].index(Target)]
else:
Us=User_SBE
M1=get_Atom_prop(Projectile,'M')
M2=get_Atom_prop(Target,'M')
if sputter == "M84":
return Us*(1.9+3.8*(M1/M2)+0.134*(M2/M1)**1.24)
elif sputter == "Y96":
gamma=4*M1*M2/(M1+M2)**2
if M1 <= M2:
return Us*(1.0+5.7*(M1/M2))/gamma
else:
return Us*6.7/gamma
else:
#It seems we are using Sigmund. Nothing todo. But we return Us nevertheless
return Us
def Gamma(Projectile,Target):
'''
from equ. 16 in Y96
'''
return get_Atom_prop(Target,'W')/(1.0+(get_Atom_prop(Projectile,'M')/7.0)**3)
def yE(Projectile,Target,Energy,sputter,model):
'''
Equ. 1
the first three parameters are straight forward. th elast one switches between different models for the Sputteryield
calculation. If model is M84 the calculation is based on that paper. Y96 uses the newer version.
the same holds for the xsection_model. See reduced_nuc_stopping_x_section() for details.
'''
if User_SBE==-1:
Us=get_Atom_prop(Target,'SBE')#Atoms[3][Atoms[0].index(Target)]
else:
Us=User_SBE
Q84=get_Atom_prop(Target,'Q84')
Q96=get_Atom_prop(Target,'Q96')
s=get_Atom_prop(Target,'s')#Atoms[7][Atoms[0].index(Target)]
if s==0:
s=2.65
if sputter=="M84":
if Energy>eth(Projectile,Target,sputter): # Otherwise we try to get the sqrt of a negative number
return (0.42*(alpha_stern(Projectile,Target,sputter)*Q84*Sn(Projectile,Target,Energy,sputter,model))/(Us*(1+0.35*Us*reduced_elec_stopping_x_section(Projectile,Target,Energy,sputter)))*(1-(eth(Projectile,Target,sputter)/Energy)**(0.5))**(2.8))
else:
return 0
elif sputter=="Y96":
if Energy>eth(Projectile,Target,sputter): # Otherwise we try to get the sqrt of a negative number
return ((0.42*Q96*alpha_stern(Projectile,Target,sputter)*Sn(Projectile,Target,Energy,sputter,model))/(Us*(1.0+(Gamma(Projectile,Target)*k(Projectile,Target)*reduced_energy(Projectile,Target,Energy,sputter)**0.3)))*((1-np.sqrt(eth(Projectile,Target,sputter)/Energy))**s))
else:
return 0
elif sputter=="Sigmund":
if Energy>Us: # Otherwise we try to get the sqrt of a negative number
return 0.42*alpha_stern(Projectile,Target,sputter)*Sn(Projectile,Target,Energy,sputter,model)/Us
else:
return 0
else:
print("No or invalid model selected. Can only be M84 or Y96.")
return 0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This defines the location of a file that holds all the atomic data we need. the files holds values fro the calculation of the sputter yield according to Sigmund as weel as Matsunami1984 and Yamamura1996. The latter two modify the calculation in a way that makes it more suitable for litgh ions aka He and Ne.
Step2: All the different values will be computed in functions so we can call them at anytime, and reuse them for making graphs and similar stuff
Step3: Reduced nuclear stopping cross section $s_n$
Step4: Reduced electronic stopping cross section $s_e=k\sqrt{\epsilon}$
Step5: Nuclear stopping $S_n=Ks_n$
Step6: Dimensionless parameter $\alpha^*$
Step7: Empirical parameters
Step8: $\Gamma$
Step9: Sputteryield
|
6,271
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
macrodata = sm.datasets.macrodata.load_pandas().data
macrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')
endog = macrodata['infl']
endog.plot(figsize=(15, 5))
# Construct the model
mod = sm.tsa.SARIMAX(endog, order=(1, 0, 0), trend='c')
# Estimate the parameters
res = mod.fit()
print(res.summary())
# The default is to get a one-step-ahead forecast:
print(res.forecast())
# Here we construct a more complete results object.
fcast_res1 = res.get_forecast()
# Most results are collected in the `summary_frame` attribute.
# Here we specify that we want a confidence level of 90%
print(fcast_res1.summary_frame(alpha=0.10))
print(res.forecast(steps=2))
fcast_res2 = res.get_forecast(steps=2)
# Note: since we did not specify the alpha parameter, the
# confidence level is at the default, 95%
print(fcast_res2.summary_frame())
print(res.forecast('2010Q2'))
fcast_res3 = res.get_forecast('2010Q2')
print(fcast_res3.summary_frame())
fig, ax = plt.subplots(figsize=(15, 5))
# Plot the data (here we are subsetting it to get a better look at the forecasts)
endog.loc['1999':].plot(ax=ax)
# Construct the forecasts
fcast = res.get_forecast('2011Q4').summary_frame()
fcast['mean'].plot(ax=ax, style='k--')
ax.fill_between(fcast.index, fcast['mean_ci_lower'], fcast['mean_ci_upper'], color='k', alpha=0.1);
# Step 1: fit model parameters w/ training sample
training_obs = int(len(endog) * 0.8)
training_endog = endog[:training_obs]
training_mod = sm.tsa.SARIMAX(
training_endog, order=(1, 0, 0), trend='c')
training_res = training_mod.fit()
# Print the estimated parameters
print(training_res.params)
# Step 2: produce one-step-ahead forecasts
fcast = training_res.forecast()
# Step 3: compute root mean square forecasting error
true = endog.reindex(fcast.index)
error = true - fcast
# Print out the results
print(pd.concat([true.rename('true'),
fcast.rename('forecast'),
error.rename('error')], axis=1))
# Step 1: append a new observation to the sample and refit the parameters
append_res = training_res.append(endog[training_obs:training_obs + 1], refit=True)
# Print the re-estimated parameters
print(append_res.params)
# Step 2: produce one-step-ahead forecasts
fcast = append_res.forecast()
# Step 3: compute root mean square forecasting error
true = endog.reindex(fcast.index)
error = true - fcast
# Print out the results
print(pd.concat([true.rename('true'),
fcast.rename('forecast'),
error.rename('error')], axis=1))
# Setup forecasts
nforecasts = 3
forecasts = {}
# Get the number of initial training observations
nobs = len(endog)
n_init_training = int(nobs * 0.8)
# Create model for initial training sample, fit parameters
init_training_endog = endog.iloc[:n_init_training]
mod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')
res = mod.fit()
# Save initial forecast
forecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)
# Step through the rest of the sample
for t in range(n_init_training, nobs):
# Update the results by appending the next observation
updated_endog = endog.iloc[t:t+1]
res = res.append(updated_endog, refit=False)
# Save the new set of forecasts
forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)
# Combine all forecasts into a dataframe
forecasts = pd.concat(forecasts, axis=1)
print(forecasts.iloc[:5, :5])
# Construct the forecast errors
forecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)
print(forecast_errors.iloc[:5, :5])
# Reindex the forecasts by horizon rather than by date
def flatten(column):
return column.dropna().reset_index(drop=True)
flattened = forecast_errors.apply(flatten)
flattened.index = (flattened.index + 1).rename('horizon')
print(flattened.iloc[:3, :5])
# Compute the root mean square error
rmse = (flattened**2).mean(axis=1)**0.5
print(rmse)
# Setup forecasts
nforecasts = 3
forecasts = {}
# Get the number of initial training observations
nobs = len(endog)
n_init_training = int(nobs * 0.8)
# Create model for initial training sample, fit parameters
init_training_endog = endog.iloc[:n_init_training]
mod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')
res = mod.fit()
# Save initial forecast
forecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)
# Step through the rest of the sample
for t in range(n_init_training, nobs):
# Update the results by appending the next observation
updated_endog = endog.iloc[t:t+1]
res = res.extend(updated_endog)
# Save the new set of forecasts
forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)
# Combine all forecasts into a dataframe
forecasts = pd.concat(forecasts, axis=1)
print(forecasts.iloc[:5, :5])
# Construct the forecast errors
forecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)
print(forecast_errors.iloc[:5, :5])
# Reindex the forecasts by horizon rather than by date
def flatten(column):
return column.dropna().reset_index(drop=True)
flattened = forecast_errors.apply(flatten)
flattened.index = (flattened.index + 1).rename('horizon')
print(flattened.iloc[:3, :5])
# Compute the root mean square error
rmse = (flattened**2).mean(axis=1)**0.5
print(rmse)
print(endog.index)
# Annual frequency, using a PeriodIndex
index = pd.period_range(start='2000', periods=4, freq='A')
endog1 = pd.Series([1, 2, 3, 4], index=index)
print(endog1.index)
# Quarterly frequency, using a DatetimeIndex
index = pd.date_range(start='2000', periods=4, freq='QS')
endog2 = pd.Series([1, 2, 3, 4], index=index)
print(endog2.index)
# Monthly frequency, using a DatetimeIndex
index = pd.date_range(start='2000', periods=4, freq='M')
endog3 = pd.Series([1, 2, 3, 4], index=index)
print(endog3.index)
index = pd.DatetimeIndex([
'2000-01-01 10:08am', '2000-01-01 11:32am',
'2000-01-01 5:32pm', '2000-01-02 6:15am'])
endog4 = pd.Series([0.2, 0.5, -0.1, 0.1], index=index)
print(endog4.index)
mod = sm.tsa.SARIMAX(endog4)
res = mod.fit()
res.forecast(1)
# Here we'll catch the exception to prevent printing too much of
# the exception trace output in this notebook
try:
res.forecast('2000-01-03')
except KeyError as e:
print(e)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic example
Step2: Constructing and estimating the model
Step3: Forecasting
Step4: The get_forecast method is more general, and also allows constructing confidence intervals.
Step5: The default confidence level is 95%, but this can be controlled by setting the alpha parameter, where the confidence level is defined as $(1 - \alpha) \times 100\%$. In the example above, we specified a confidence level of 90%, using alpha=0.10.
Step6: However, if your data included a Pandas index with a defined frequency (see the section at the end on Indexes for more information), then you can alternatively specify the date through which you want forecasts to be produced
Step7: Plotting the data, forecasts, and confidence intervals
Step8: Note on what to expect from forecasts
Step9: To add on another observation, we can use the append or extend results methods. Either method can produce the same forecasts, but they differ in the other results that are available
Step10: Notice that these estimated parameters are slightly different than those we originally estimated. With the new results object, append_res, we can compute forecasts starting from one observation further than the previous call
Step11: Putting it altogether, we can perform the recursive forecast evaluation exercise as follows
Step12: We now have a set of three forecasts made at each point in time from 1999Q2 through 2009Q3. We can construct the forecast errors by subtracting each forecast from the actual value of endog at that point.
Step13: To evaluate our forecasts, we often want to look at a summary value like the root mean square error. Here we can compute that for each horizon by first flattening the forecast errors so that they are indexed by horizon and then computing the root mean square error fore each horizon.
Step14: Using extend
Step15: By not re-estimating the parameters, our forecasts are slightly worse (the root mean square error is higher at each horizon). However, the process is faster, even with only 200 datapoints. Using the %%timeit cell magic on the cells above, we found a runtime of 570ms using extend versus 1.7s using append with refit=True. (Note that using extend is also faster than using append with refit=False).
Step16: In most cases, if your data has an associated data/time index with a defined frequency (like quarterly, monthly, etc.), then it is best to make sure your data is a Pandas series with the appropriate index. Here are three examples of this
Step17: In fact, if your data has an associated date/time index, it is best to use that even if does not have a defined frequency. An example of that kind of index is as follows - notice that it has freq=None
Step18: You can still pass this data to statsmodels' model classes, but you will get the following warning, that no frequency data was found
Step19: What this means is that you cannot specify forecasting steps by dates, and the output of the forecast and get_forecast methods will not have associated dates. The reason is that without a given frequency, there is no way to determine what date each forecast should be assigned to. In the example above, there is no pattern to the date/time stamps of the index, so there is no way to determine what the next date/time should be (should it be in the morning of 2000-01-02? the afternoon? or maybe not until 2000-01-03?).
Step20: The index associated with the new forecast is 4, because if the given data had an integer index, that would be the next value. A warning is given letting the user know that the index is not a date/time index.
|
6,272
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from pylab import *
%matplotlib inline
import os
import sys
#TODO: specify your caffe root folder here
caffe_root = "X:\caffe_siggraph/caffe-windows-master"
sys.path.insert(0, caffe_root+'/python')
import caffe
#TODO: change to your own network and deploying file
PRETRAINED_FILE = '../models/triplet_googlenet/triplet_googlenet_finegrain_final.caffemodel'
sketch_model = '../models/triplet_googlenet/googlenet_sketchdeploy.prototxt'
image_model = '../models/triplet_googlenet/googlenet_imagedeploy.prototxt'
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
sketch_net = caffe.Net(sketch_model, PRETRAINED_FILE, caffe.TEST)
img_net = caffe.Net(image_model, PRETRAINED_FILE, caffe.TEST)
sketch_net.blobs.keys()
#TODO: set output layer name. You can use sketch_net.blobs.keys() to list all layer
output_layer_sketch = 'pool5/7x7_s1_s'
output_layer_image = 'pool5/7x7_s1_p'
#set the transformer
transformer = caffe.io.Transformer({'data': np.shape(sketch_net.blobs['data'].data)})
transformer.set_mean('data', np.array([104, 117, 123]))
transformer.set_transpose('data',(2,0,1))
transformer.set_channel_swap('data', (2,1,0))
transformer.set_raw_scale('data', 255.0)
#photo paths
photo_paths = 'C:\Users\Patsorn\Documents/notebook_backup/SBIR/photos/'
sketch_paths = 'C:\Users\Patsorn\Documents/notebook_backup/SBIR/sketches/'
#load up test images
with open('../list/test_img_list.txt','r') as my_file:
test_img_list = [c.rstrip() for c in my_file.readlines()]
#extract feature for all test images
feats = []
N = np.shape(test_img_list)[0]
for i,path in enumerate(test_img_list):
imgname = path.split('/')[-1]
imgname = imgname.split('.jpg')[0]
imgcat = path.split('/')[0]
print '\r',str(i+1)+'/'+str(N)+ ' '+'Extracting ' +path+'...',
full_path = photo_paths + path
img = (transformer.preprocess('data', caffe.io.load_image(full_path.rstrip())))
img_in = np.reshape([img],np.shape(sketch_net.blobs['data'].data))
out_img = img_net.forward(data=img_in)
out_img = np.copy(out_img[output_layer_image])
feats.append(out_img)
print 'done',
np.shape(feats)
feats = np.resize(feats,[np.shape(feats)[0],np.shape(feats)[2]]) #quick fixed for size
#build nn pool
from sklearn.neighbors import NearestNeighbors,LSHForest
nbrs = NearestNeighbors(n_neighbors=np.size(feats,0), algorithm='brute',metric='cosine').fit(feats)
#compute score
num_query = 0
count_recall = [0]*1250
sum_rank = 0
sum_class_rank = [0]*125
count_recall_class = np.zeros((125,1250),np.float)
i_coco =-1
for i,img in enumerate(test_img_list):
imgname = img.split('/')[-1]
imgname = imgname.split('.jpg')[0]
imgcat = img.split('/')[0]
sketch_list = os.listdir(sketch_paths+imgcat)
sketch_img_list = [skg for skg in sketch_list if skg.startswith(imgname+'-') and skg.endswith('-5.png')]#change this skg.endswith('-1.png') to the variation you want
for sketch in sketch_img_list:
sketch_path = sketch_paths + imgcat+'/' + sketch
sketch_in = (transformer.preprocess('data', plt.imread(sketch_path)))
sketch_in = np.reshape([sketch_in],np.shape(sketch_net.blobs['data'].data))
query = sketch_net.forward(data=sketch_in)
query=np.copy(query[output_layer_sketch])
distances, indices = nbrs.kneighbors(np.reshape(query,[np.shape(query)[1]]))
num_query = num_query+1
print '\r','...'+sketch+'...',
for j,indice in enumerate(indices[0]):
if indice==i:
#this j is the right one.
count_recall[j] = count_recall[j]+1
print '\r','ranking: '+imgcat+ ' '+sketch + ' found at ' +str(j),
break
cum_count = [0]*1250
sumc = 0
for i,c in enumerate(count_recall):
sumc = sumc + c
cum_count[i] = sumc
print '\nRecall @K=1 = ', 1.00*cum_count[0]/cum_count[-1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: caffe
Step2: Now we can load up the network. You can change the path to your own network here. Make sure to use the matching deploy prototxt files and change the target layer to your layer name.
Step3: Sketchy test set
|
6,273
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
import numpy as np
b = phoebe.default_binary()
b.set_value('q', value=0.7)
b.set_value('incl', component='binary', value=87)
b.set_value('requiv', component='primary', value=0.8)
b.set_value('teff', component='secondary', value=6500)
b.set_value('syncpar', component='secondary', value=1.5)
anim_times = phoebe.arange(0.44, 0.56, 0.002)
b.add_dataset('rv',
times=phoebe.linspace(0,1,201),
dataset='dynamicalrvs')
b.set_value_all('rv_method', dataset='dynamicalrvs', value='dynamical')
b.add_dataset('rv',
times=phoebe.linspace(0,1,201),
dataset='numericalrvs')
b.set_value_all('rv_method', dataset='numericalrvs', value='flux-weighted')
b.add_dataset('mesh',
compute_times=anim_times,
coordinates='uvw',
columns=['rvs@numericalrvs'],
dataset='mesh01')
b.add_dataset('lp',
compute_times=anim_times,
component=['primary', 'secondary'],
wavelengths=phoebe.linspace(549.5,550.5,101),
profile_rest=550)
b.run_compute(irrad_method='none')
colors = {'primary': 'green', 'secondary': 'magenta'}
afig, mplfig = b.plot(kind='rv',
c=colors,
ls={'numericalrvs': 'solid', 'dynamicalrvs': 'dotted'},
show=True)
afig, mplfig= b.plot(time=0.46,
fc='rvs@numericalrvs', ec='face',
c=colors,
ls={'numericalrvs': 'solid', 'dynamicalrvs': 'dotted'},
highlight={'numericalrvs': True, 'dynamicalrvs': False},
axpos={'mesh': 211, 'rv': 223, 'lp': 224},
xlim={'rv': (0.4, 0.6)}, ylim={'rv': (-80, 80)},
tight_layout=True,
show=True)
afig, mplanim = b.plot(times=anim_times,
fc='rvs@numericalrvs', ec='face',
c=colors,
ls={'numericalrvs': 'solid', 'dynamicalrvs': 'dotted'},
highlight={'numericalrvs': True, 'dynamicalrvs': False},
pad_aspect=False,
axpos={'mesh': 211, 'rv': 223, 'lp': 224},
xlim={'rv': (0.4, 0.6)}, ylim={'rv': (-80, 80)},
animate=True,
save='rossiter_mclaughlin.gif',
save_kwargs={'writer': 'imagemagick'})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new bundle.
Step2: Now we'll try to exaggerate the effect by spinning up the secondary component.
Step3: Adding Datasets
Step4: We'll add two identical datasets, one where we compute only dynamical RVs (won't include Rossiter-McLaughlin) and another where we compute flux-weighted RVs (will include Rossiter-McLaughlin).
Step5: For the mesh, we'll save some time by only exposing plane-of-sky coordinates and the 'rvs' column.
Step6: And for the line-profile, we'll expose the line-profile for both of our stars separately, instead of for the entire system.
Step7: Running Compute
Step8: Plotting
Step9: First let's compare between the dynamical and numerical RVs.
Step10: Now let's make a plot of the line profiles and mesh during ingress to visualize what's happening.
Step11: Here we can see that star in front (green) is eclipsing more of the blue-shifted part of the back star (magenta), distorting the line profile, causing the apparent center of the line profile to be shifted to the right/red, and therefore the radial velocities to be articially increased as compared to the dynamical RVs.
|
6,274
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Grab newest JAX version.
!pip install --upgrade -q jax==0.1.57 jaxlib==0.1.37
# Make sure the Colab Runtime is set to Accelerator: TPU.
import requests
import os
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print(config.FLAGS.jax_backend_target)
!pip install --upgrade -q sentencepiece
!pip install --upgrade -q gin git+https://github.com/google/trax.git@v1.2.0
from tensorflow.compat.v1.io.gfile import GFile
import gin
import os
import jax
import trax
from trax.supervised import inputs
import numpy as onp
import jax.numpy as np
from scipy.special import softmax
from sentencepiece import SentencePieceProcessor
# Import a copy of "Crime and Punishment", by Fyodor Dostoevsky
with GFile('gs://trax-ml/reformer/crime-and-punishment-2554.txt') as f:
text = f.read()
# The file read above includes metadata and licensing information.
# For training our language model, we will only use the actual novel text.
start = text.find('CRIME AND PUNISHMENT') # skip header
start = text.find('CRIME AND PUNISHMENT', start + 1) # skip header
start = text.find('CRIME AND PUNISHMENT', start + 1) # skip translator preface
end = text.rfind('End of Project') # skip extra text at the end
text = text[start:end].strip()
# Load a BPE vocabulaary with 320 types. This mostly consists of single letters
# and pairs of letters, but it has some common words and word pieces, too.
!gsutil cp gs://trax-ml/reformer/cp.320.* .
TOKENIZER = SentencePieceProcessor()
TOKENIZER.load('cp.320.model')
# Tokenize
IDS = TOKENIZER.EncodeAsIds(text)
IDS = onp.asarray(IDS, dtype=onp.int32)
PAD_AMOUNT = 512 * 1024 - len(IDS)
print("Number of tokens:", IDS.shape[0])
# Set up the data pipeline.
def my_inputs(n_devices):
while True:
inputs = []
mask = []
pad_amounts = onp.random.choice(PAD_AMOUNT, n_devices)
for i in range(n_devices):
inputs.append(onp.pad(IDS, (pad_amounts[i], PAD_AMOUNT - pad_amounts[i]),
mode='constant'))
mask.append(onp.pad(onp.ones_like(IDS, dtype=onp.float32),
(pad_amounts[i], PAD_AMOUNT - pad_amounts[i]),
mode='constant'))
inputs = onp.stack(inputs)
mask = onp.stack(mask)
yield (inputs, inputs, mask)
print("(device count, tokens per device) = ",
next(my_inputs(trax.math.device_count()))[0].shape)
# Configure hyperparameters.
gin.parse_config(
import trax.layers
import trax.models
import trax.optimizers
import trax.supervised.inputs
import trax.supervised.trainer_lib
# Parameters that will vary between experiments:
# ==============================================================================
train.model = @trax.models.ReformerLM
# Our model will have 6 layers, alternating between the LSH attention proposed
# in the Reformer paper and local attention within a certain context window.
n_layers = 6
attn_type = [
@TimeBinCausalAttention,
@LSHCausalAttention,
@TimeBinCausalAttention,
@LSHCausalAttention,
@TimeBinCausalAttention,
@LSHCausalAttention,
]
share_qk = False # LSHCausalAttention ignores this flag and always shares q & k
n_heads = 2
attn_kv = 64
dropout = 0.05
n_tokens = 524288
# Parameters for MultifactorSchedule:
# ==============================================================================
MultifactorSchedule.constant = 0.01
MultifactorSchedule.factors = 'constant * linear_warmup * cosine_decay'
MultifactorSchedule.warmup_steps = 100
MultifactorSchedule.steps_per_cycle = 900
# Parameters for Adam:
# ==============================================================================
Adam.weight_decay_rate=0.0
Adam.b1 = 0.86
Adam.b2 = 0.92
Adam.eps = 1e-9
# Parameters for TimeBinCausalAttention:
# ==============================================================================
TimeBinCausalAttention.bin_length = 64
TimeBinCausalAttention.dropout = 0.05
TimeBinCausalAttention.n_bins = None
TimeBinCausalAttention.share_qk = %share_qk
# Parameters for LSHCausalAttention:
# ==============================================================================
LSHCausalAttention.allow_duplicate_attention = False
LSHCausalAttention.attend_across_buckets = True
LSHCausalAttention.rehash_each_round = True
LSHCausalAttention.data_rotation = False
LSHCausalAttention.n_bins = 4096
LSHCausalAttention.n_buckets = 8192
LSHCausalAttention.factorize_hash = [64, 128]
LSHCausalAttention.n_hashes = 1
LSHCausalAttention.one_rng = False
LSHCausalAttention.hard_k = 0
LSHCausalAttention.dropout = 0.0
LSHCausalAttention.drop_for_hash_rate = 0.0
LSHCausalAttention.max_len_for_inference = 2048
LSHCausalAttention.bucket_capacity_for_inference = 64
# Parameters for ReformerLM:
# ==============================================================================
ReformerLM.attention_type = %attn_type
ReformerLM.d_attention_key = %attn_kv
ReformerLM.d_attention_value = %attn_kv
ReformerLM.d_model = 256
ReformerLM.d_ff = 512
ReformerLM.dropout = %dropout
ReformerLM.ff_activation = @trax.layers.Relu
ReformerLM.max_len = %n_tokens
ReformerLM.mode = 'train'
ReformerLM.n_heads = %n_heads
ReformerLM.n_layers = %n_layers
ReformerLM.vocab_size = 320
ReformerLM.share_qk = %share_qk
ReformerLM.axial_pos_shape = (512, 1024)
ReformerLM.d_axial_pos_embs= (64, 192)
)
# Set up a Trainer.
output_dir = os.path.expanduser('~/train_dir/')
!rm -f ~/train_dir/model.pkl # Remove old model
trainer = trax.supervised.Trainer(
model=trax.models.ReformerLM,
loss_fn=trax.layers.CrossEntropyLoss,
optimizer=trax.optimizers.Adam,
lr_schedule=trax.lr.MultifactorSchedule,
inputs=trax.supervised.inputs.Inputs(my_inputs),
output_dir=output_dir,
has_weights=True)
# Run one training step, to make sure the model fits in memory.
# The first time trainer.train_epoch is called, it will JIT the entire network
# architecture, which takes around 2 minutes. The JIT-compiled model is saved
# so subsequent runs will be much faster than the first.
trainer.train_epoch(n_steps=1, n_eval_steps=1)
# Train for 600 steps total
# The first ~20 steps are slow to run, but after that it reaches steady-state
# speed. This will take at least 30 minutes to run to completion, but can safely
# be interrupted by selecting "Runtime > Interrupt Execution" from the menu.
# The language model won't be exceptionally good when trained for just a few
# steps and with minimal regularization. However, we can still sample from it to
# see what it learns.
trainer.train_epoch(n_steps=9, n_eval_steps=1)
for _ in range(59):
trainer.train_epoch(n_steps=10, n_eval_steps=1)
# As we report in the Reformer paper, increasing the number of hashing rounds
# helps with quality. We can even increase the number of hashing rounds at
# evaluation time only.
gin.parse_config(LSHCausalAttention.n_hashes = 4)
model_infer = trax.models.ReformerLM(mode='predict')
# Prepare a jitted copy of the model.
jit_model_infer = trax.layers.base._accelerate(
model_infer._forward_internal, trax.math.device_count())
# Set up the initial state for sampling.
infer_state = model_infer.new_weights_and_state(
trax.supervised.trainer_lib.ShapeDtype((1,1), dtype=np.int32))[1]
infer_state = trainer._for_n_devices(infer_state)
def sample(length=2048, prompt=None):
Sample from the ReformerLM model
model_weights = trainer._opt_state[0][0]
# Token id 0 is the equivalent of a "start" token
cur_inputs = np.zeros((trax.math.device_count(), 1, 1), dtype=np.int32)
cur_state = infer_state
rngs = trax.math.random.split(trax.math.random.get_prng(0), trax.math.device_count())
all_samples = []
if prompt is not None:
prompt = np.asarray(
[TOKENIZER.EncodeAsIds(prompt)] * trax.math.device_count())
for iteration in range(length):
logits, cur_state = jit_model_infer(
cur_inputs,
model_weights,
cur_state,
rngs)
if prompt is not None and iteration < prompt.shape[1]:
cur_samples = onp.array(prompt[:, iteration], dtype=int)
else:
logits = onp.array(logits)[:,0,0,:]
probs = onp.exp(logits)
cur_samples = [onp.random.choice(probs.shape[-1], p=probs[i,:])
for i in range(probs.shape[0])]
cur_samples = onp.array(cur_samples, dtype=int)
all_samples.append(cur_samples)
cur_inputs = np.array(cur_samples[:,None,None])
all_samples = onp.stack(all_samples, -1)
return all_samples
# Sample from the Reformer language model, given a prefix.
samples = sample(length=128, prompt="There was a time when")
for ids in samples:
print(TOKENIZER.DecodeIds(ids.tolist()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reformer
Step2: Setting up data and model
Step4: As we see above, "Crime and Punishment" has just over half a million tokens with the BPE vocabulary we have selected.
Step7: Sample from the model
|
6,275
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
np.random.seed(10)
A = tf.constant(np.random.randint(low=0, high=5, size=(10, 20, 30)))
B = tf.constant(np.random.randint(low=0, high=5, size=(10, 20, 30)))
import numpy as np
def g(A,B):
return tf.constant(np.einsum( 'ikm, jkm-> ijk', A, B))
result = g(A.__copy__(),B.__copy__())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
6,276
|
<ASSISTANT_TASK:>
Python Code:
import datetime
from functools import reduce
import json
import os
import numpy as np
import pandas as pd
from planet import api
import rasterio
from sklearn.cluster import MiniBatchKMeans
from sklearn.ensemble import RandomForestClassifier
from utils import Timer
import visual
# Import functionality from local notebooks
from ipynb.fs.defs.drc_roads_download import get_overlapping_scenes, StripDownloader, StripSearcher
from ipynb.fs.defs.drc_roads_classification \
import load_training_data, classify_forest, classified_band_to_rgb
# load aoi. Saved in drc_roads_download notebook
aoi_geojson_filename = os.path.join('pre-data', 'aoi.geojson')
# create planet api client
planet_api_key = os.environ['PL_API_KEY']
# quick check that key is defined
assert planet_api_key, "PL_API_KEY not defined."
client = api.ClientV1(api_key=planet_api_key)
# load aoi json
with open(aoi_geojson_filename) as json_data:
aoi = json.load(json_data)
# get good overlapping scenes from to July 2017 to February 2018
begin=datetime.datetime(year=2017,month=7,day=1)
end=datetime.datetime(year=2018,month=3,day=1)
strip_searcher = StripSearcher(aoi, begin, end, client)
good_scenes = strip_searcher.search(aoi_geojson_filename)
asset_type = 'analytic'
strip_downloader = StripDownloader(good_scenes, aoi_geojson_filename, client)
strip_downloader.run(asset_type, overwrite=False, verbose=True)
# sort strip ids as integers. Strip id order is the same as strip temporal order
strip_ids = [int(s) for s in good_scenes.strip_id.unique().tolist()]
strip_ids.sort()
strip_ids = [str(s) for s in strip_ids]
strip_mosaics = [strip_downloader.get_filename(i, asset_type) for i in strip_ids]
# strip_mosaics
classified_bands_file = os.path.join('data', 'classified_bands.npz')
def save_to_cache(classified_bands, strip_ids):
save_bands = dict((s, classified_bands[s])
for s in strip_ids)
# masked arrays are saved as just arrays, so save mask for later
save_bands.update(dict((s+'_msk', classified_bands[s].mask)
for s in strip_ids))
np.savez_compressed(classified_bands_file, **save_bands)
def load_from_cache():
tmp_cls = np.load(classified_bands_file)
sids = [k for k in tmp_cls.keys() if not k.endswith('_msk')]
# reform masked array from saved array and saved mask
classified_bands = dict((s, np.ma.array(tmp_cls[s], mask=tmp_cls[s+'_msk']))
for s in sids)
return classified_bands
use_cache = True
if use_cache and os.path.isfile(classified_bands_file):
print('using cached classified bands')
classified_bands = load_from_cache()
else:
X_training, y_training = load_training_data()
with Timer():
def classify(sid):
img = strip_downloader.get_filename(sid, asset_type)
# we only have two values, 0 and 1. Convert to uint8 for memory
band = (classify_forest(img, X_training, y_training)).astype(np.uint8)
return band
classified_bands = dict((s, classify(s)) for s in strip_ids)
# save to cache
save_to_cache(classified_bands, strip_ids)
# Decimate classified arrays for memory conservation
def decimate(arry, num=8):
return arry[::num, ::num].copy()
do_visualize = True # set to True to view images
if do_visualize:
for strip_id, classified_band in classified_bands.items():
visual.plot_image(classified_band_to_rgb(decimate(classified_band)),
title='Classified Image ({})'.format(strip_id),
figsize=(8, 8))
def get_bands(classified_bands):
return [classified_bands[k] for k in classified_bands.keys()]
def perc_masked(mask):
return 100*(np.count_nonzero(mask))/mask.size
def num_valid(mask):
return (mask.size - np.count_nonzero(mask))
def combine_masks(classified_bands):
masks = [b.mask for b in get_bands(classified_bands)]
total_mask = reduce(np.logical_or, masks)
print('{} valid pixels ({}% masked)'.format(num_valid(total_mask),
round(perc_masked(total_mask), 2)))
return total_mask
def valid_pixels(band, mask):
masked_band = np.ma.array(band, mask=mask)
return masked_band.compressed()
def bands_to_X(classified_bands, mask):
# X shape: (num_pixels, num_bands)
all_valid_pixels = [valid_pixels(b, mask) for b in get_bands(classified_bands)]
X = np.swapaxes(np.stack(all_valid_pixels), 0, 1)
return X
# lets just work with pixels that have a data point in every image
total_mask = combine_masks(classified_bands)
X = bands_to_X(classified_bands, total_mask)
def classify_kmeans(X):
mbk = MiniBatchKMeans(n_clusters=2)
with Timer():
mbk.fit(X)
with Timer():
y_pred = mbk.predict(X)
return y_pred
y_pred = classify_kmeans(X)
def y_to_band(y, mask):
band = np.ma.array(np.zeros(mask.shape), mask=mask)
band[~band.mask] = y
return band
y_band = y_to_band(y_pred, total_mask)
print(y_band.shape)
print(y_band.size - np.count_nonzero(y_band.mask))
print(np.count_nonzero(y_band))
# separate the class numbers into rgb bands
def classified_band_to_rgb(classified_band):
return [classified_band == n for n in [0,1,2]]
visual.plot_image(classified_band_to_rgb(y_band), title='KMeans Classified Image', figsize=(15, 15))
label_band = abs(classified_bands['1130991'] - classified_bands['739199'])
visual.plot_image(classified_band_to_rgb(decimate(label_band)),
title='Classified Difference Image',
figsize=(8, 8))
# set nodata to zero, move class values to 1 and 2
label_band_array = (label_band + 1).filled(0)
# get source crs
with rasterio.open(strip_mosaics[0], 'r') as src:
src_crs = src.crs
src_transform = src.transform
src_crs
src_transform
label_img = os.path.join('pre-data', 'difference.tif')
(height, width) = label_band_array.shape
profile = {
'width': width,
'height': height,
'driver': 'GTiff',
'count': 1,
'compress': 'lzw',
'dtype': rasterio.uint8,
'crs': src_crs,
'transform': src_transform
}
with rasterio.open(label_img, 'w', **profile) as dst:
# multiply by 100 for visibility
dst.write(100 * label_band_array.astype(rasterio.uint8), 1)
# uncomment to get metadata for label_img
# !gdalinfo $label_img
change_img = os.path.join('pre-data', 'difference_change.tif')
nochange_img = os.path.join('pre-data', 'difference_nochange.tif')
def get_label_mask(image_filename):
with rasterio.open(image_filename, 'r') as src:
band = src.read(1)
label_data = band == 255 # valid data in white regions
label_mask = ~label_data # mask True (masked) for not valid data
return label_mask
def get_unmasked_count(mask):
return np.size(mask) - np.count_nonzero(mask)
change_mask = get_label_mask(change_img)
print(get_unmasked_count(change_mask))
nochange_mask = get_label_mask(nochange_img)
print(get_unmasked_count(nochange_mask))
# combine the label masks with the valid data mask and then create X dataset for each label
total_mask = combine_masks(classified_bands)
total_change_mask = np.logical_or(total_mask, change_mask)
print('{} valid pixels ({}% masked)'.format(num_valid(total_change_mask),
round(perc_masked(total_change_mask), 2)))
X_change = bands_to_X(classified_bands, total_change_mask)
total_nochange_mask = np.logical_or(total_mask, nochange_mask)
print('{} valid pixels ({}% masked)'.format(num_valid(total_nochange_mask),
round(perc_masked(total_nochange_mask), 2)))
X_nochange = bands_to_X(classified_bands, total_nochange_mask)
def sample_pixels(pixels, sample_size):
if pixels.shape[0] > sample_size:
pixel_sample = pixels.copy()
np.random.shuffle(pixel_sample)
pixel_sample = pixel_sample[:sample_size]
else:
pixel_sample = pixels
return pixel_sample
def make_same_size_samples(list_of_X, size_percent=100):
# X: array of dim (num_pixels, num_classified_bands)
sample_len = min([p.shape[0] for p in list_of_X])
# only return size_percent number of pixels in the sample
if size_percent < 100 and size_percent > 0:
sample_len = round(sample_len * size_percent/100.0)
return [sample_pixels(p, sample_len) for p in list_of_X]
[X_change_sample, X_nochange_sample] = \
make_same_size_samples([X_change, X_nochange], size_percent=10)
print(X_change_sample.shape)
print(X_nochange_sample.shape)
change_label_value = 0
nochange_label_value = 1
X_rf = np.concatenate((X_change_sample, X_nochange_sample), axis=0)
y_rf = np.array(X_change_sample.shape[0] * [change_label_value] + \
X_nochange_sample.shape[0] * [nochange_label_value])
print(X_rf.shape)
print(y_rf.shape)
def classify_change(classified_bands, mask, X_training, y_training):
clf = RandomForestClassifier()
with Timer():
clf.fit(X_training, y_training)
X = bands_to_X(classified_bands, total_mask)
with Timer():
y_pred = clf.predict(X)
y_band = y_to_band(y_pred, total_mask)
return y_band
with Timer():
y_band_rf = classify_change(classified_bands, total_mask, X_rf, y_rf)
visual.plot_image(classified_band_to_rgb(y_band_rf), title='RF Classified Image', figsize=(25, 25))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Scenes
Step2: Download portions of OrthoTile strips that overlap AOI
Step3: Get mosaic image names
Step4: Classify Scenes
Step5: Visualize Classified Scenes
Step6: The results of classification in these images is mixed. There are a few images where the UDM fails to identify clouds (e.g. 1114901) and few images where the results actually look pretty good but classification missfires and classifies everything as non-forest (e.g. 1091588). There may be uniform in the imagery that makes it brighter than usual, which causes the classifier to fail. Ultimately, visual inspection finds that about 8 of the 21 classified images looks informative.
Step7: Ok. We can kind of see roads here, but the new roads (aka the change) isn't clearly identified. Let's try supervised classification.
Step8: Save label image with metadata from the original image
Step9: Create Label Masks from Label Images
Step10: Get Features from Labels
Step11: Classify
|
6,277
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (16.0, 8.0)
df = pandas.read_csv('./stroopdata.csv')
df.describe()
df.hist()
import math
df['differences'] = df['Incongruent']-df['Congruent']
N =df['differences'].count()
print "Sample size:\t\t%d"% N
print "DoF:\t\t\t%d"%(df['differences'].count()-1)
mean = df['differences'].mean()
std = df['differences'].std()
tscore = mean/(std/math.sqrt(N))
print "Differences Mean:\t%.3f" % mean
print "Differences Std:\t%.3f" % std
print "t-score:\t\t%.3f" %tscore
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Provide one or two visualizations that show the distribution of the sample data. Write one or two sentences noting what you observe about the plot or plots.
Step2: R
|
6,278
|
<ASSISTANT_TASK:>
Python Code:
import medusa
from medusa.test import create_test_ensemble
ensemble = create_test_ensemble("Staphylococcus aureus")
import pandas as pd
biolog_base = pd.read_csv("../medusa/test/data/biolog_base_composition.csv", sep=",")
biolog_base
# convert the biolog base to a dictionary, which we can use to set ensemble.base_model.medium directly.
biolog_base = {'EX_'+component:1000 for component in biolog_base['ID']}
# Double check that the objective is set to the biomass reaction.
# For this model, 'bio1' is the id of the biomass reaction.
ensemble.base_model.objective = 'bio1'
from medusa.flux_analysis import flux_balance
carbon_sources = ["EX_cpd00027_e","EX_cpd00179_e"]
fluxes = {}
for carbon_source in carbon_sources:
biolog_base[carbon_source] = 10
ensemble.base_model.medium = biolog_base
fluxes[carbon_source] = flux_balance.optimize_ensemble(ensemble,return_flux='bio1', num_processes = 4)
biolog_base[carbon_source] = 0
import matplotlib.pylab as plt
import numpy as np
fig, ax = plt.subplots()
bins=np.histogram(np.hstack((fluxes[carbon_sources[0]]['bio1'],fluxes[carbon_sources[1]]['bio1'])), bins=20)[1]
plt.hist(fluxes[carbon_sources[0]]['bio1'],
bins = bins,
label=carbon_sources[0],
color = "red",
alpha = 0.5)
plt.hist(fluxes[carbon_sources[1]]['bio1'],
bins = bins,
label=carbon_sources[1],
color = "blue",
alpha = 0.5)
plt.axvline(x=fluxes[carbon_sources[0]]['bio1'].mean(), c = 'red')
plt.axvline(x=fluxes[carbon_sources[1]]['bio1'].mean(), c = 'blue')
ax.set_ylabel('# ensemble members')
ax.set_xlabel('Flux through biomass reaction')
ax.legend()
plt.show()
from scipy.stats import wilcoxon
cond1 = fluxes[carbon_sources[0]].copy()
cond2 = fluxes[carbon_sources[1]].copy()
cond1.columns = [carbon_sources[0]]
cond2.columns = [carbon_sources[1]]
both_conditions = pd.concat([cond1,cond2], axis = 1, join_axes = [cond1.index])
wilcoxon(x=both_conditions[carbon_sources[0]],y=both_conditions[carbon_sources[1]])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's simulate growth on two different carbon sources, D-glucose (metabolite id
Step2: Now let's visualize the distributions of predicted flux through biomass using matplotlib. We'll generate a histogram for each condition, and plot the mean using a vertical line
Step3: Visually, we can see the mean for D-glucose (cpd00027) is slightly lower than for maltose (cpd00179). To evaluate this statistically, we'll use the Wilcoxon signed-rank test (implemented in SciPy), which tests the null hypothesis that the difference between paired samples (e.g. growth in D-glucose minus growth in maltose for each ensemble member) is symmetrically distributed around zero. Here, we choose a statistical test meant for paired data because each simulation result in one media condition has a related simulation result in the other condition which was generated using the same ensemble member. The Wilcoxon signed-rank test is suitable for paired univariate comparisons regardless of the distribution of data (e.g. when data are non-normally distributed, replace a paired t-test with the Wolcoxon signed-rank test).
|
6,279
|
<ASSISTANT_TASK:>
Python Code:
#%qtconsole # For inspecting variables.
# Standard
import os
from glob import glob # Unix style pathname pattern expansion.
import csv
import pickle
import time
# Scientific Computing and Visualization
import numpy as np; np.random.seed(13) # Lucky seed.
import matplotlib.pyplot as plt
%matplotlib inline
import cv2 # Image processing.
# Machine Learning
#from sklearn.utils import shuffle # Used numpy's shuffle instead.
#from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.contrib.layers import flatten
# Project-Specific.
# Includes custom functions for
# image processing, data manipulation, and dataset visualization.
from image_recognition_tools import *
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def convert_mnist_dataset_to_standard_form(data):
'''Map pixel values into [0, 255].
'''
example_cnt = data.images.shape[0]
images = 255.0*data.images
np.round(images, out=images)
np.clip(images, 0.0, 255.0, out=images)
X = np.reshape(images, newshape=(example_cnt, 28, 28, 1)).astype(np.uint8)
y = np.argmax(data.labels, axis=1).astype(np.uint8)
return X, y
X_train, y_train = convert_mnist_dataset_to_standard_form(mnist.train)
X_train, y_train = shuffle(X_train, y_train)
X_validate, y_validate = convert_mnist_dataset_to_standard_form(mnist.validation)
X_validate, y_validate = shuffle(X_validate, y_validate)
X_test, y_test = convert_mnist_dataset_to_standard_form(mnist.test)
X_test, y_test = shuffle(X_test, y_test)
# Number of training examples.
training_example_cnt = X_train.shape[0]
# Number of validation examples.
validation_example_cnt = X_validate.shape[0]
# Number of testing examples.
test_example_cnt = X_test.shape[0]
# List of unique class labels.
class_list = list(set(y_train) | set(y_validate) | set(y_test))
class_list.sort()
# Number of classes in the dataset.
class_cnt = len(class_list) # Should be 0,...,42
# Single image shape.
image_shape = X_train[0, :].shape
print('Number of training examples =', training_example_cnt)
print('Number of validation examples =', validation_example_cnt)
print('Number of testing examples =', test_example_cnt)
print('Number of classes =', class_cnt)
print('Image shape =', image_shape)
X_all, y_all = combine(
X_train, y_train, X_validate, y_validate, X_test, y_test)
images_by_label = sort_by_class(X_all, y_all, class_list)
plot_representative_images(
images_by_label, class_cnt=class_cnt, english_labels=None, method='mean')
#plt.savefig('./fig/class_representative_images.png')
class_histograms(y_train, y_validate, y_test, class_cnt=class_cnt)
#plt.savefig('./fig/class_histograms.png')
image = X_train[100, :]
test_randomly_perturb(
image,
brightness_radius=0.3,
rotation_radius=30.0,
translation_radius=3,
shear_radius=3)
#plt.savefig('./fig/image_perturbations_example.png')
DATAPOINTS_PER_CLASS_AFTER_AUGMENTATION = 7000 # Default 7000.
X_train_augmented, y_train_augmented = balance(
X_train, y_train, class_list,
datapoint_cnt_per_class=DATAPOINTS_PER_CLASS_AFTER_AUGMENTATION,
perturb=True)
print('Number of augmented training examples =', len(y_train_augmented))
test_histogram_equalize_brightness(X_train[100, :])
def preprocess(X):
'''Prepare numpy array of images for input into CNN.
Args:
X: numpy array of images, e.g., as X_train,
X_validate, or X_test.
Returns:
X_in: numpy array of normalized gray images.
'''
X_in = np.zeros(
(X.shape[0], X.shape[1], X.shape[2], 1), dtype=np.float32)
for i, image in enumerate(X):
image = histogram_equalize_brightness(image)
#X_in[i, :] = normalize(image)[:, :, None] # Adds degenerate dimension.
X_in[i, :] = normalize(image)
return X_in
TRAIN_WITH_AUGMENTED_DATA = False
if TRAIN_WITH_AUGMENTED_DATA:
X_train_augmented_in = preprocess(X_train_augmented)
X_train_in = X_train_augmented_in
y_train = y_train_augmented
else:
X_train_in = preprocess(X_train)
X_validate_in = preprocess(X_validate)
X_test_in = preprocess(X_test)
def kjo_net(X_in, keep_probability):
'''CNN Architecture.
Args:
X_in: tf.placeholder(tf.float32, (None, 28, 28, 1)), preprocessed images.
keep_probability: tf.placeholder(tf.float32), probability of keeping nodes
during dropout.
Returns:
logits
'''
# Hyperparameters used with tf.truncated_normal to set random
# initial values of the weights and biases in each layer.
mu = 0.0 # Default 0.0
sigma = 0.1 # Default 0.1
#
# If using ReLU activations, initialize with slight positive bias, e.g., 0.1,
# to avoid "dead neurons".
initial_bias = 0.05
# Layer 1: Convolutional, 28x28x1 --> 24x24x16.
# In nominal LeNet was 32x32x1 --> 28x28x6.
# shape := (
# filter_height,
# filter_width,
# input_depth aka input_channel_cnt,
# output_depth aka output_channel_cnt).
conv1_W = tf.Variable(
tf.truncated_normal(shape=(5, 5, 1, 16), mean=mu, stddev=sigma),
name='conv1_W')
conv1_b = tf.Variable(tf.zeros(16) + initial_bias, name='conv1_b')
conv1 = tf.nn.conv2d(
X_in, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
conv1 = tf.nn.relu(conv1) # Activation.
#
# Pooling. 24x24x16 --> 12x12x16.
# In nominal LeNet was 28x28x6 --> 14x14x6.
conv1 = tf.nn.max_pool(
conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Convolutional, 12x12x16 --> 8x8x32.
# In nominal LeNet was 14x14x6 --> 10x10x16.
conv2_W = tf.Variable(
tf.truncated_normal(shape=(5, 5, 16, 32), mean=mu, stddev=sigma),
name='conv2_W')
conv2_b = tf.Variable(tf.zeros(32) + initial_bias, name='conv2_b')
conv2 = tf.nn.conv2d(
conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
conv2 = tf.nn.relu(conv2) # Activation.
#
# Pooling, 8x8x32 --> 4x4x32.
# In nominal LeNet was 10x10x16 --> 5x5x16.
conv2 = tf.nn.max_pool(
conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Flatten layer, 4x4x32 --> 512.
# In nominal LeNet was 5x5x16 --> 400.
# Flattens tensor into 2 dimensions (batches, length).
fc0 = flatten(conv2)
# Layer 3: Fully Connected, 512 --> 400.
# In nominal LeNet was 400 --> 120.
fc1_W = tf.Variable(
tf.truncated_normal(shape=(512, 400), mean=mu, stddev=sigma),
name='fc1_W')
fc1_b = tf.Variable(tf.zeros(400) + initial_bias, name='fc1_b')
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
fc1 = tf.nn.relu(fc1) # Activation.
#
# Dropout.
fc1 = tf.nn.dropout(fc1, keep_probability)
# Layer 4: Fully Connected, 400 --> 300.
# In nominal LeNet was 120 --> 84.
fc2_W = tf.Variable(
tf.truncated_normal(shape=(400, 300), mean=mu, stddev=sigma),
name='fc2_W')
fc2_b = tf.Variable(tf.zeros(300) + initial_bias, name='fc2_b')
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
fc2 = tf.nn.relu(fc2) # Activation.
# Dropout.
fc2 = tf.nn.dropout(fc2, keep_probability)
# Layer 5: Fully Connected, 300 --> class_cnt (default 10).
# In nominal LeNet was 84 --> 10.
fc3_W = tf.Variable(
tf.truncated_normal(shape=(300, class_cnt), mean=mu, stddev=sigma),
name='fc3_W')
fc3_b = tf.Variable(tf.zeros(class_cnt) + initial_bias, name='fc3_b')
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
# Hyperparameters.
LEARNING_RATE = 0.001 # Default 0.001
EPOCH_CNT = 30 # Number of times to run over training data. Default 30.
BATCH_SIZE = 256 # Datapoints to consider per backprop pass. Default 256.
KEEP_PROBABILITY = 0.6 # For dropout regularization, e.g., 0.3, 0.4, 0.5, 0.6, 0.7, 1.0
# Input images and output labels.
X_in = tf.placeholder(tf.float32, (None, 28, 28, 1)) # None => arbitrary batch size.
y = tf.placeholder(tf.int32, (None)) # None => arbitrary batch size.
keep_probability = tf.placeholder(tf.float32)
y_one_hot = tf.one_hot(y, class_cnt)
#keep_probability = tf.placeholder(tf.float32)
logits = kjo_net(X_in, keep_probability)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=y_one_hot, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_one_hot, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# To save parameters for re-use.
saver = tf.train.Saver() # Must be initialized after tf symbolic variables.
validation_save_filename = './kjo_net.validation.ckpt'
def compute_accuracy(session, X_in_, y_):
'''Compute model accuracy.
::WARNING:: The trailing underscores on the argument variable names
are to prevent collision with global tensoflow variables X_in and y.
Args:
session: tf.session.
X_in_: np.array, preprocessed inputs.
y_: np.array, outputs corresponding to X_in.
Returns:
float, accuracy of model applied to (X_in, y).
'''
#session = tf.get_default_session()
example_cnt = len(X_in_)
total_accuracy = 0
for offset in range(0, example_cnt, BATCH_SIZE):
batch_X_in, batch_y = \
X_in_[offset:offset+BATCH_SIZE], \
y_[offset:offset+BATCH_SIZE]
accuracy = session.run(
accuracy_operation,
feed_dict={X_in: batch_X_in, y: batch_y, keep_probability: 1.0})
total_accuracy += (accuracy * len(batch_X_in))
return total_accuracy / example_cnt
start_time = time.time()
accuracies_train = []
accuracies_validate = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
example_cnt = len(X_train_in)
print('Training...\n')
for i in range(EPOCH_CNT):
X_train_in, y_train = shuffle(X_train_in, y_train)
for offset in range(0, example_cnt, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_X_in, batch_y = X_train_in[offset:end], y_train[offset:end]
session.run(
training_operation,
feed_dict={X_in: batch_X_in, y: batch_y, keep_probability: KEEP_PROBABILITY})
accuracy_train = compute_accuracy(session, X_train_in, y_train)
accuracies_train.append(accuracy_train)
accuracy_validate = compute_accuracy(session, X_validate_in, y_validate)
accuracies_validate.append(accuracy_validate)
print('EPOCH {} ...'.format(i+1))
#print("Loss = {:.3f}".format(loss))
print('Training Accuracy = {:.3f}'.format(accuracy_train))
print('Validation Accuracy = {:.3f}'.format(accuracy_validate))
print()
saver.save(session, validation_save_filename)
print("Model saved")
dt_s = time.time() - start_time
dt_m = dt_s / 60.0
dt_h = dt_s / 3600.0
print('Wallclock time elapsed:' \
' {:.3f} s = {:.3f} m = {:.3f} h.'.format(dt_s, dt_m, dt_h))
# Plot accuracies vs epoch.
plt.plot(
np.arange(EPOCH_CNT), accuracies_train,
label='training accuracy', zorder=0, linewidth=3)
plt.plot(
np.arange(EPOCH_CNT), accuracies_validate,
label='validation accuracy', zorder=0, linewidth=3)
plt.title('Accuracies vs Epoch', fontsize=16, fontweight='bold')
plt.xlabel('Epoch', fontsize=14, fontweight='bold')
plt.ylabel('Accuracy', fontsize=14, fontweight='bold')
legend = plt.legend(numpoints=1)
plt.setp(legend.get_texts(), fontsize='14', fontweight='bold')
ax = plt.gca()
#plt.axis('equal') # Defective so use set_aspect instead?
#ax.set_aspect('equal', adjustable='box')
ax.margins(0.1)
#ax.set_yscale('log')
#ax.autoscale(tight=True)
#plt.xlim((0, 2000.0))
#plt.ylim((0, 1000.0))
plt.grid(True)
plt.show()
#plt.savefig('./fig/accuracies_vs_epoch.png')
with tf.Session() as session:
#saver = tf.train.import_meta_graph('kjo_net.meta')
#saver.restore(session, tf.train.latest_checkpoint('.'))
saver.restore(session, validation_save_filename)
test_accuracy = compute_accuracy(session, X_test_in, y_test)
print('Test Accuracy = {:.3f}'.format(test_accuracy))
X_train_final_in, y_train_final = combine(
X_train_in, y_train, X_validate_in, y_validate)
print('Number of datapoints in final training set:', len(y_train_final))
# To save parameters for re-use.
testing_save_filename = './kjo_net.testing.ckpt'
start_time = time.time()
accuracies_train = []
accuracies_test = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
example_cnt = len(X_train_final_in)
print('Training...\n')
for i in range(EPOCH_CNT):
X_train_final_in, y_train_final = shuffle(X_train_final_in, y_train_final)
for offset in range(0, example_cnt, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_X_in, batch_y = X_train_final_in[offset:end], y_train_final[offset:end]
session.run(
training_operation,
feed_dict={X_in: batch_X_in, y: batch_y, keep_probability: KEEP_PROBABILITY})
accuracy_train = compute_accuracy(session, X_train_final_in, y_train_final)
accuracies_train.append(accuracy_train)
accuracy_test = compute_accuracy(session, X_test_in, y_test)
accuracies_test.append(accuracy_test)
print('EPOCH {} ...'.format(i+1))
#print("Loss = {:.3f}".format(loss))
print('Training Accuracy = {:.3f}'.format(accuracy_train))
print('Test Accuracy = {:.3f}'.format(accuracy_test))
print()
saver.save(session, testing_save_filename)
print("Model saved")
dt_s = time.time() - start_time
dt_m = dt_s / 60.0
dt_h = dt_s / 3600.0
print('Wallclock time elapsed:' \
' {:.3f} s = {:.3f} m = {:.3f} h.'.format(dt_s, dt_m, dt_h))
# Plot accuracies vs epoch.
plt.plot(
np.arange(EPOCH_CNT), accuracies_train,
label='training accuracy', zorder=0, linewidth=3)
plt.plot(
np.arange(EPOCH_CNT), accuracies_test,
label='test accuracy', zorder=0, linewidth=3)
plt.title('Accuracies vs Epoch', fontsize=16, fontweight='bold')
plt.xlabel('Epoch', fontsize=14, fontweight='bold')
plt.ylabel('Accuracy', fontsize=14, fontweight='bold')
legend = plt.legend(numpoints=1)
plt.setp(legend.get_texts(), fontsize='14', fontweight='bold')
ax = plt.gca()
#plt.axis('equal') # Defective so use set_aspect instead?
#ax.set_aspect('equal', adjustable='box')
ax.margins(0.1)
#ax.set_yscale('log')
#ax.autoscale(tight=True)
#plt.xlim((0, 2000.0))
#plt.ylim((0, 1000.0))
plt.grid(True)
#plt.savefig('./fig/accuracies_vs_epoch-final.png')
# Load new images.
new_datapoint_cnt = 5
ix_offset = 13
X_new = X_test[ix_offset:new_datapoint_cnt + ix_offset, :]
y_new = y_test[ix_offset:new_datapoint_cnt + ix_offset]
# Plot new images.
plot_images(X_new, y_new, english_labels=None)
#plt.savefig('./fig/new_images.png')
X_new_in = preprocess(X_new)
with tf.Session() as session:
#session.run(tf.global_variables_initializer())
#saver = tf.train.import_meta_graph('kjo_net.meta')
#saver.restore(session, tf.train.latest_checkpoint('.'))
saver.restore(session, testing_save_filename)
top_k = session.run(
tf.nn.top_k(tf.nn.softmax(logits), k=5),
feed_dict={X_in: X_new_in, keep_probability: 1.0})
test_accuracy = compute_accuracy(session, X_new_in, y_new)
# Plot softmax probabilities.
ixs = np.arange(X_new.shape[0])
probabilities, classes = top_k
probability_barh_width = 14.5
probability_barh_height = 3
for i, image in enumerate(X_new):
print('\nImage %g\n Truth class %g\n Predicted class %g' % \
(i, y_new[i], top_k[1][i, 0]))
fig, axes = plt.subplots(
1, 2, figsize=(probability_barh_width, probability_barh_height))
plt.subplot(1, 2, 1)
plt.imshow(image.squeeze(), cmap='gray')
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.subplot(1, 2, 2)
ax = plt.gca()
plt.barh(ixs, probabilities[i], height=0.5, align='center')
y_tick_labels = []
for j in ixs:
label = str(classes[i][j])
y_tick_labels.append(truncate_string(label, ubnd=25))
plt.yticks( ixs, y_tick_labels)
plt.title('Top 5 Class Probabilities')
#plt.xlabel('Probability')
#ax = plt.gca(); ax.set_xscale('log')
ax.invert_yaxis()
for i, value in enumerate(probabilities[i]):
ax.text(value + 0.03, i + .03, str(value), color='blue', fontweight='bold')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlim(0.0, 1.0)
plt.show();
#plt.savefig('./fig/new_image-%03g.top_softmax_probabilities.png' % i)
print('Test Accuracy on new images = {:.3f}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0 Load Data
Step2: 1 Dataset Summary, Exploration, and Balancing
Step3: Exploratory Visualization
Step4: And now we compute and display the means, one for each label.
Step5: Next, let's see how balanced the data are by making histograms of the labels in each of the different subsets of the data.
Step6: The classes have roughly equal representation both within each subset (train, validate, test) and across subsets. This is good because otherwise we would have to worry about unbalanced data leading to a classifier which is biased away from under-represented classes. We may still benefit from augmenting the datasets by synthesizing new images.
Step7: 2 CNN Architecture and Testing
Step8: We use _in to denote preprocessed versions of data because it is what is feed into the neural network proper.
Step9: TensorFlow Graph Definition
Step10: Training and Validation
Step11: Final Training and Testing
Step12: Since the gap between training and test accuracy is relatively small, and the accuracies increased roughly monotonically during training, I believe the model is well-fitting (i.e. not over- or underfitting).
Step13: Predictions and Top 5 Softmax Probabilities for Each Image
|
6,280
|
<ASSISTANT_TASK:>
Python Code:
# imports
import numpy as np
import matplotlib.pyplot as plt
from landlab import RasterModelGrid, imshow_grid
from landlab.components import TidalFlowCalculator
# set up the grid
grid = RasterModelGrid(
(3, 101), xy_spacing=2.0
) # only 1 row of core nodes, between 2 boundary rows
grid.set_closed_boundaries_at_grid_edges(
False, True, True, True
) # only east/right side open
z = grid.add_zeros("topographic__elevation", at="node") # create the bathymetry field
z[:] = -50.0 # mean water depth is 50 m below MSL, which is our vertical datum
# create the component
tfc = TidalFlowCalculator(grid, tidal_range=2.0, tidal_period=4.0e4, roughness=0.01)
# run the component
tfc.run_one_step()
# calculate the analytical solution
x = np.arange(3.0, 200.0, 2.0)
vel_analytical = 2.0e-6 * x
# plot both
plt.plot(x, grid.at_link["ebb_tide_flow__velocity"][grid.active_links], "b.")
plt.plot(x, vel_analytical, "r")
plt.xlabel("Distance from sea wall (x)")
plt.ylabel("Ebb tide velocity (m/s)")
plt.legend(["numerical", "analytical"])
from landlab.grid.mappers import map_link_vector_components_to_node
def map_velocity_components_to_nodes(grid):
Map the velocity components from the links to the nodes, and return the node arrays.
ebb_vel_x, ebb_vel_y = map_link_vector_components_to_node(
grid, grid.at_link["ebb_tide_flow__velocity"]
)
flood_vel_x = -ebb_vel_x
flood_vel_y = -ebb_vel_y
return (ebb_vel_x, ebb_vel_y, flood_vel_x, flood_vel_y)
def plot_tidal_flow(grid, resample=1):
(ebb_x, ebb_y, flood_x, flood_y) = map_velocity_components_to_nodes(grid)
# depth
plt.figure()
imshow_grid(
grid, grid.at_node["mean_water__depth"], cmap="YlGnBu", color_for_closed="g"
)
plt.title("Water depth (m)")
plt.xlabel("Distance (m)")
plt.ylabel("Distance (m)")
# down-sample for legible quiver plots if needed
if resample != 1:
xr = grid.x_of_node.reshape(
(grid.number_of_node_rows, grid.number_of_node_columns)
)[::resample, ::resample]
yr = grid.y_of_node.reshape(
(grid.number_of_node_rows, grid.number_of_node_columns)
)[::resample, ::resample]
ebb_xr = ebb_x.reshape((grid.number_of_node_rows, grid.number_of_node_columns))[
::resample, ::resample
]
ebb_yr = ebb_y.reshape((grid.number_of_node_rows, grid.number_of_node_columns))[
::resample, ::resample
]
fld_xr = flood_x.reshape(
(grid.number_of_node_rows, grid.number_of_node_columns)
)[::resample, ::resample]
fld_yr = flood_y.reshape(
(grid.number_of_node_rows, grid.number_of_node_columns)
)[::resample, ::resample]
else:
xr = grid.x_of_node
yr = grid.y_of_node
ebb_xr = ebb_x
ebb_yr = ebb_y
fld_xr = flood_x
fld_yr = flood_y
# ebb tide
plt.figure()
imshow_grid(grid, grid.at_node["topographic__elevation"])
plt.quiver(xr, yr, ebb_xr, ebb_yr)
plt.title("Ebb Tide")
plt.xlabel("Distance (m)")
plt.ylabel("Distance (m)")
ebb_vel_magnitude = np.sqrt(ebb_x * ebb_x + ebb_y * ebb_y)
plt.figure()
imshow_grid(grid, ebb_vel_magnitude, cmap="magma", color_for_closed="g")
plt.title("Ebb Tide Velocity Magnitude (m/s)")
plt.xlabel("Distance (m)")
plt.ylabel("Distance (m)")
# flood tide
plt.figure()
imshow_grid(grid, grid.at_node["topographic__elevation"])
plt.quiver(xr, yr, fld_xr, fld_yr)
plt.title("Flood Tide")
plt.xlabel("Distance (m)")
plt.ylabel("Distance (m)")
plt.figure()
flood_vel_magnitude = np.sqrt(flood_x * flood_x + flood_y * flood_y)
imshow_grid(grid, flood_vel_magnitude, cmap="magma", color_for_closed="g")
plt.title("Flood Tide Velocity Magnitude (m/s)")
plt.xlabel("Distance (m)")
plt.ylabel("Distance (m)")
# parameters
nrows = 15
ncols = 25
grid_spacing = 100.0 # m
mean_depth = 2.0 # m
tidal_range = 2.0 # m
roughness = 0.01 # s/m^1/3, i.e., Manning's n
# create and set up the grid
grid = RasterModelGrid((nrows, ncols), xy_spacing=grid_spacing)
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -mean_depth
grid.set_closed_boundaries_at_grid_edges(False, False, True, True)
# instantiate the TidalFlowCalculator
tfc = TidalFlowCalculator(grid, tidal_range=2.0, roughness=0.01)
# run it
tfc.run_one_step()
# make plots...
plot_tidal_flow(grid)
# parameters
nrows = 400
ncols = 200
grid_spacing = 2.0 # m
mean_depth = 2.0 # m
tidal_range = 3.1 # m
tidal_period = 12.5 * 3600.0 # s
roughness = 0.01 # s/m^1/3, i.e., Manning's n
open_nodes = np.arange(95, 105, dtype=int)
# create and set up the grid
grid = RasterModelGrid((nrows, ncols), xy_spacing=grid_spacing)
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -mean_depth
grid.set_closed_boundaries_at_grid_edges(True, True, True, False)
# instantiate the TidalFlowCalculator
tfc = TidalFlowCalculator(
grid, tidal_range=tidal_range, tidal_period=tidal_period, roughness=0.01
)
# run it
tfc.run_one_step()
# make plots...
plot_tidal_flow(grid, resample=5)
# parameters
nrows = 400
ncols = 200
grid_spacing = 2.0 # m
mean_depth = 2.0 # m
tidal_range = 3.1 # m
tidal_period = 12.5 * 3600.0 # s
roughness = 0.01 # s/m^1/3, i.e., Manning's n
open_nodes = np.arange(95, 105, dtype=int)
# create and set up the grid
grid = RasterModelGrid((nrows, ncols), xy_spacing=grid_spacing)
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -mean_depth
grid.set_closed_boundaries_at_grid_edges(True, True, True, True)
grid.status_at_node[open_nodes] = grid.BC_NODE_IS_FIXED_VALUE
# instantiate the TidalFlowCalculator
tfc = TidalFlowCalculator(
grid, tidal_range=tidal_range, tidal_period=tidal_period, roughness=0.01
)
# run it
tfc.run_one_step()
# make plots...
plot_tidal_flow(grid, resample=5)
from landlab.grid.mappers import map_max_of_link_nodes_to_link
# parameters
nrows = 400
ncols = 200
grid_spacing = 2.0 # m
marsh_height = 1.0 # m
channel_depth = 2.0 # m
tidal_range = 3.1 # m
tidal_period = 12.5 * 3600.0 # s
open_nodes = np.arange(
94, 105, dtype=int
) # IDs of open-boundary nodes (along channel at bottom/south boundary)
roughness_shallow = 0.2 # Manning's n for areas above mean sea level (i.e., the marsh)
roughness_deep = 0.01 # Manning's n for areas below mean sea level (i.e., the channel)
# create and set up the grid
grid = RasterModelGrid((nrows, ncols), xy_spacing=grid_spacing)
z = grid.add_zeros("topographic__elevation", at="node")
z[grid.core_nodes] = marsh_height
channel = np.logical_and(grid.x_of_node >= 188.0, grid.x_of_node <= 208.0)
z[channel] = -channel_depth
grid.set_closed_boundaries_at_grid_edges(True, True, True, True)
grid.status_at_node[open_nodes] = grid.BC_NODE_IS_FIXED_VALUE
# set up roughness field (calculate on nodes, then map to links)
roughness_at_nodes = roughness_shallow + np.zeros(z.size)
roughness_at_nodes[z < 0.0] = roughness_deep
roughness = grid.add_zeros("roughness", at="link")
map_max_of_link_nodes_to_link(grid, roughness_at_nodes, out=roughness)
# instantiate the TidalFlowCalculator
tfc = TidalFlowCalculator(
grid, tidal_range=tidal_range, tidal_period=tidal_period, roughness="roughness"
)
# run it
tfc.run_one_step()
# make plots...
plot_tidal_flow(grid, resample=10)
from landlab.io import read_esri_ascii
# Set parameters (these are from the MarshMorpho2D source code)
tidal_period = 12.5 * 3600.0 # tidal period in seconds
tidal_range = 3.1 # tidal range in meters
roughness = 0.02 # Manning's n
mean_sea_level = 0.0 # mean sea level in meters
min_water_depth = (
0.01 # minimum depth for water on areas higher than low tide water surface, meters
)
nodata_code = 999 # code for a DEM cell with no valid data
# Read the DEM to create a grid and topography field
(grid, z) = read_esri_ascii("zSW3.asc", name="topographic__elevation")
# Configure boundaries: any nodata nodes, plus any nodes higher than mean high tide
grid.status_at_node[z == nodata_code] = grid.BC_NODE_IS_CLOSED
grid.status_at_node[z > 1.8] = grid.BC_NODE_IS_CLOSED
boundaries_above_msl = np.logical_and(
grid.status_at_node == grid.BC_NODE_IS_FIXED_VALUE, z > 0.0
)
grid.status_at_node[boundaries_above_msl] = grid.BC_NODE_IS_CLOSED
# Instantiate a TidalFlowCalculator component
tfc = TidalFlowCalculator(
grid,
tidal_period=tidal_period,
tidal_range=tidal_range,
roughness=roughness,
mean_sea_level=mean_sea_level,
min_water_depth=min_water_depth,
)
# Calculate tidal flow
tfc.run_one_step()
# make plots...
plot_tidal_flow(grid, resample=5)
from landlab.grid.mappers import map_link_vector_components_to_node
def plot_tidal_flow_hex(grid):
(ebb_x, ebb_y) = map_link_vector_components_to_node(
grid, grid.at_link["ebb_tide_flow__velocity"]
)
# ebb tide velocity vectors & magnitude
ebb_vel_magnitude = np.sqrt(ebb_x * ebb_x + ebb_y * ebb_y)
plt.figure()
imshow_grid(grid, ebb_vel_magnitude, cmap="magma")
plt.quiver(grid.x_of_node, grid.y_of_node, ebb_x, ebb_y)
plt.title("Ebb Tide")
plt.xlabel("Distance (m)")
plt.ylabel("Distance (m)")
from landlab import HexModelGrid
# parameters
nrows = 15
ncols = 25
grid_spacing = 100.0 # m
mean_depth = 2.0 # m
tidal_range = 2.0 # m
roughness = 0.01 # s/m^1/3, i.e., Manning's n
# create and set up the grid
grid = HexModelGrid((nrows, ncols), spacing=grid_spacing, node_layout="rect")
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -mean_depth
grid.status_at_node[grid.nodes_at_bottom_edge] = grid.BC_NODE_IS_CLOSED
grid.status_at_node[grid.nodes_at_left_edge] = grid.BC_NODE_IS_CLOSED
# instantiate the TidalFlowCalculator
tfc = TidalFlowCalculator(grid, tidal_range=2.0, roughness=0.01)
# run it
tfc.run_one_step()
# make plots...
plot_tidal_flow_hex(grid)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: As we would expect, the numerical solution is slightly lower than the analytical solution, because our simplified analytical solution does not take into account the extra water depth whose gradient propels the ebb tide. (Exercise to the reader
Step3: Uniform with one open boundary
Step4: Uniform with narrow open boundary
Step5: Straight channel
Step6: Case study based on example in Giulio Mariotti's MarshMorpho2D package
Step7: Example with hex grid
|
6,281
|
<ASSISTANT_TASK:>
Python Code:
import logging
import os.path
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from gensim import corpora, models, similarities
if (os.path.exists("/tmp/deerwester.dict")):
dictionary = corpora.Dictionary.load('/tmp/deerwester.dict')
corpus = corpora.MmCorpus('/tmp/deerwester.mm')
print("Used files generated from first tutorial")
else:
print("Please run first tutorial to generate data set")
print (dictionary[0])
print (dictionary[1])
print (dictionary[2])
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
doc_bow = [(0, 1), (1, 1)]
print(tfidf[doc_bow]) # step 2 -- use the model to transform vectors
corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
print(doc)
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=2) # initialize an LSI transformation
corpus_lsi = lsi[corpus_tfidf] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
lsi.print_topics(2)
for doc in corpus_lsi: # both bow->tfidf and tfidf->lsi transformations are actually executed here, on the fly
print(doc)
lsi.save('/tmp/model.lsi') # same for tfidf, lda, ...
lsi = models.LsiModel.load('/tmp/model.lsi')
model = models.TfidfModel(corpus, normalize=True)
model = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=300)
model = models.RpModel(corpus_tfidf, num_topics=500)
model = models.LdaModel(corpus, id2word=dictionary, num_topics=100)
model = models.HdpModel(corpus, id2word=dictionary)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: if you want to see logging events.
Step2: In this tutorial, I will show how to transform documents from one vector representation into another. This process serves two goals
Step3: We used our old corpus from tutorial 1 to initialize (train) the transformation model. Different transformations may require different initialization parameters; in case of TfIdf, the “training” consists simply of going through the supplied corpus once and computing document frequencies of all its features. Training other models, such as Latent Semantic Analysis or Latent Dirichlet Allocation, is much more involved and, consequently, takes much more time.
Step4: Or to apply a transformation to a whole corpus
Step5: In this particular case, we are transforming the same corpus that we used for training, but this is only incidental. Once the transformation model has been initialized, it can be used on any vectors (provided they come from the same vector space, of course), even if they were not used in the training corpus at all. This is achieved by a process called folding-in for LSA, by topic inference for LDA etc.
Step6: Here we transformed our Tf-Idf corpus via Latent Semantic Indexing into a latent 2-D space (2-D because we set num_topics=2). Now you’re probably wondering
Step7: (the topics are printed to log – see the note at the top of this page about activating logging)
Step8: The next question might be
Step9: Latent Semantic Indexing, LSI (or sometimes LSA) transforms documents from either bag-of-words or (preferrably) TfIdf-weighted space into a latent space of a lower dimensionality. For the toy corpus above we used only 2 latent dimensions, but on real corpora, target dimensionality of 200–500 is recommended as a “golden standard” [1].
Step10: LSI training is unique in that we can continue “training” at any point, simply by providing more training documents. This is done by incremental updates to the underlying model, in a process called online training. Because of this feature, the input document stream may even be infinite – just keep feeding LSI new documents as they arrive, while using the computed transformation model as read-only in the meanwhile!
Step11: Latent Dirichlet Allocation, LDA is yet another transformation from bag-of-words counts into a topic space of lower dimensionality. LDA is a probabilistic extension of LSA (also called multinomial PCA), so LDA’s topics can be interpreted as probability distributions over words. These distributions are, just like with LSA, inferred automatically from a training corpus. Documents are in turn interpreted as a (soft) mixture of these topics (again, just like with LSA).
Step12: gensim uses a fast implementation of online LDA parameter estimation based on [2], modified to run in distributed mode on a cluster of computers.
|
6,282
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
column_names = ['user_id', 'item_id', 'rating', 'timestamp']
df = pd.read_csv('u.data', sep='\t', names=column_names)
df.head()
movie_titles = pd.read_csv("Movie_Id_Titles")
movie_titles.head()
df = pd.merge(df,movie_titles,on='item_id')
df.head()
n_users = df.user_id.nunique()
n_items = df.item_id.nunique()
print('Num. of Users: '+ str(n_users))
print('Num of Movies: '+str(n_items))
from sklearn.cross_validation import train_test_split
train_data, test_data = train_test_split(df, test_size=0.25)
#Create two user-item matrices, one for training and another for testing
train_data_matrix = np.zeros((n_users, n_items))
for line in train_data.itertuples():
train_data_matrix[line[1]-1, line[2]-1] = line[3]
test_data_matrix = np.zeros((n_users, n_items))
for line in test_data.itertuples():
test_data_matrix[line[1]-1, line[2]-1] = line[3]
from sklearn.metrics.pairwise import pairwise_distances
user_similarity = pairwise_distances(train_data_matrix, metric='cosine')
item_similarity = pairwise_distances(train_data_matrix.T, metric='cosine')
def predict(ratings, similarity, type='user'):
if type == 'user':
mean_user_rating = ratings.mean(axis=1)
#You use np.newaxis so that mean_user_rating has same format as ratings
ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
elif type == 'item':
pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
return pred
item_prediction = predict(train_data_matrix, item_similarity, type='item')
user_prediction = predict(train_data_matrix, user_similarity, type='user')
from sklearn.metrics import mean_squared_error
from math import sqrt
def rmse(prediction, ground_truth):
prediction = prediction[ground_truth.nonzero()].flatten()
ground_truth = ground_truth[ground_truth.nonzero()].flatten()
return sqrt(mean_squared_error(prediction, ground_truth))
print('User-based CF RMSE: ' + str(rmse(user_prediction, test_data_matrix)))
print('Item-based CF RMSE: ' + str(rmse(item_prediction, test_data_matrix)))
sparsity=round(1.0-len(df)/float(n_users*n_items),3)
print('The sparsity level of MovieLens100K is ' + str(sparsity*100) + '%')
import scipy.sparse as sp
from scipy.sparse.linalg import svds
#get SVD components from train matrix. Choose k.
u, s, vt = svds(train_data_matrix, k = 20)
s_diag_matrix=np.diag(s)
X_pred = np.dot(np.dot(u, s_diag_matrix), vt)
print('User-based CF MSE: ' + str(rmse(X_pred, test_data_matrix)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can then read in the u.data file, which contains the full dataset. You can read a brief description of the dataset here.
Step2: Let's take a quick look at the data.
Step3: Note how we only have the item_id, not the movie name. We can use the Movie_ID_Titles csv file to grab the movie names and merge it with this dataframe
Step4: Then merge the dataframes
Step5: Now let's take a quick look at the number of unique users and movies.
Step6: Train Test Split
Step7: Memory-Based Collaborative Filtering
Step8: You can use the pairwise_distances function from sklearn to calculate the cosine similarity. Note, the output will range from 0 to 1 since the ratings are all positive.
Step9: Next step is to make predictions. You have already created similarity matrices
Step10: Evaluation
Step11: Memory-based algorithms are easy to implement and produce reasonable prediction quality.
Step12: To give an example of the learned latent preferences of the users and items
|
6,283
|
<ASSISTANT_TASK:>
Python Code:
print "Hello World!"
x=42
print x+10
print x/4
x="42"
print x+10
print x+"10"
x=[1, 2, 3]
y=[4,5, 6]
print x
print x*2
print x+y
print range(10)
print range(20, 50, 3)
print []
x=range(10)
print x
print "First value", x[0]
print "Last value", x[-1]
print "Fourth to sixth values", x[3:5]
x=[1,2,3,4,5]
x[2]=8
print x
print "Testing append"
x.append(6)
print x
x.append([7,8])
print x
print "testing extend"
x=[1,2,3,4,5]
#x.extend(6)
#print x
x.extend([7,8])
print x
print "testing insert"
x=[1,2,3,4,5]
x.insert(3, "in")
print x
x=range(1,11,1)
print x
x_2=[]
for i in x:
i_2=i*i
x_2.append(i_2)
print x_2
x=range(1,11,1)
print x
x_2=[i*i for i in x]
print x_2
x={}
x['answer']=42
print x['answer']
AbMag={'U':5.61, 'B':5.48, 'V':4.83, 'R':4.42, 'I':4.08}
print AbMag['U']
print AbMag.items()
def GeoSum(r):
powers=range(1,11,1) #set up a list for the exponents 1 to 10
terms=[(1./(r**x)) for x in powers] #calculate each term in the series
return sum(terms) #return the sum of the list
TermValue=2
print GeoSum(TermValue), (1.)/(TermValue-1)
class SampleClass:
def __init__(self, value): #run on initial setup of the class, provide a value
self.value = value
self.square = value**2
def powerraise(self, powerval): #only run when we call it, provide powerval
self.powerval=powerval
self.raisedpower=self.value**powerval
MyNum=SampleClass(3)
print MyNum.value
print MyNum.square
MyNum.powerraise(4)
print MyNum.powerval
print MyNum.raisedpower
print MyNum.value,'^',MyNum.powerval,'=',MyNum.raisedpower
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are, however, a few lines that you will usually see in a Python script. The first line often starts with #! and is called the shebang. For a Python script, an example of the shebang line would be "#!/usr/bin/env python"
Step2: Lists
Step3: We can also set up a quick list if we want to using the range function. If we use just a single number, then we'll get a list of integers from 0 to 1 less than the number we gave it.
Step4: If we want to, we can refer to subsets of the list. For just a single term, we can just use the number corresponding to that position. An important thing with Python is that the list index starts at 0, not at 1, starting from the first term. If we're more concerned about the last number in the list, then we can use negative numbers as the index. The last item in the list is -1, the item before that is -2, and so on.
Step5: Modifying lists
Step6: Loops and List Comprehension
Step7: While that loop works, even this pretty simple example can be condensed into something a bit shorter. We have to set up a blank list, and then after that, the loop itself was 3 lines, so just getting the squares of all these values took 4 lines. We can do it in one with list comprehension.
Step8: Dictionaries
Step9: These are particularly useful if you'll have a handful of values you'd like to call back to often. For an astronomy example, we can set up a dictionary that contains the absolute magnitude of the Sun in a bunch of bands (from Binney & Merrifield). We can now have a code that easily calls absolute magnitudes whenever needed using that dictionary.
Step10: Functions
Step11: Classes
|
6,284
|
<ASSISTANT_TASK:>
Python Code:
import skrf
import numpy as np
import matplotlib.pyplot as mplt
nw = skrf.network.Network('./190ghz_tx_measured.S2P')
vf = skrf.VectorFitting(nw)
vf.vector_fit(n_poles_real=4, n_poles_cmplx=4)
vf.plot_convergence()
vf.get_rms_error()
# plot frequency responses
fig, ax = mplt.subplots(2, 2)
fig.set_size_inches(12, 8)
vf.plot_s_mag(0, 0, ax=ax[0][0]) # s11
vf.plot_s_mag(0, 1, ax=ax[0][1]) # s12
vf.plot_s_mag(1, 0, ax=ax[1][0]) # s21
vf.plot_s_mag(1, 1, ax=ax[1][1]) # s22
fig.tight_layout()
mplt.show()
freqs = np.linspace(0, 500e9, 501) # plot model response from dc to 500 GHz
fig, ax = mplt.subplots(2, 2)
fig.set_size_inches(12, 8)
vf.plot_s_mag(0, 0, freqs=freqs, ax=ax[0][0]) # s11
vf.plot_s_mag(0, 1, freqs=freqs, ax=ax[0][1]) # s12
vf.plot_s_mag(1, 0, freqs=freqs, ax=ax[1][0]) # s21
vf.plot_s_mag(1, 1, freqs=freqs, ax=ax[1][1]) # s22
fig.tight_layout()
mplt.show()
vf.vector_fit(n_poles_real=3, n_poles_cmplx=4)
vf.plot_convergence()
vf.get_rms_error()
fig, ax = mplt.subplots(2, 2)
fig.set_size_inches(12, 8)
vf.plot_s_mag(0, 0, freqs=freqs, ax=ax[0][0]) # s11
vf.plot_s_mag(0, 1, freqs=freqs, ax=ax[0][1]) # s12
vf.plot_s_mag(1, 0, freqs=freqs, ax=ax[1][0]) # s21
vf.plot_s_mag(1, 1, freqs=freqs, ax=ax[1][1]) # s22
fig.tight_layout()
mplt.show()
vf.vector_fit(n_poles_real=0, n_poles_cmplx=5)
vf.plot_convergence()
vf.get_rms_error()
fig, ax = mplt.subplots(2, 2)
fig.set_size_inches(12, 8)
vf.plot_s_mag(0, 0, freqs=freqs, ax=ax[0][0]) # s11
vf.plot_s_mag(0, 1, freqs=freqs, ax=ax[0][1]) # s12
vf.plot_s_mag(1, 0, freqs=freqs, ax=ax[1][0]) # s21
vf.plot_s_mag(1, 1, freqs=freqs, ax=ax[1][1]) # s22
fig.tight_layout()
mplt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This example is a lot more tricky to fit, because the responses contain a few "bumps" and noise from the measurement. In such a case, finding a good number of initial poles can take a few iterations.
Step2: First attempt
Step3: The function plot_convergence() can be helpful to examine the convergence and see if something was going wrong.
Step4: Checking the results by comparing the model responses to the original sampled data indicates a successful fit, which is also indicated by a small rms error (less than 0.05)
Step5: It is a good idea to also check the model response well outside the original frequency range.
Step6: Second attempt
Step7: This fit took more iterations, but it converged nevertheless and it matches the network data very well inside the fitting band. Again, a small rms error is achieved
Step8: This looks good, so let's export the model as a SPICE subcircuit. For example
Step9: Even though the pole relocation process oscillated between two (or more?) solutions and did not converge, the fit was still successful, because the solutions themselves did converge
|
6,285
|
<ASSISTANT_TASK:>
Python Code:
class Module(object):
def __init__ (self):
self.output = None
self.gradInput = None
self.training = True
Basically, you can think of a module as of a something (black box)
which can process `input` data and produce `ouput` data.
This is like applying a function which is called `forward`:
output = module.forward(input)
The module should be able to perform a backward pass: to differentiate the `forward` function.
More, it should be able to differentiate it if is a part of chain (chain rule).
The latter implies there is a gradient from previous step of a chain rule.
gradInput = module.backward(input, gradOutput)
def forward(self, input):
Takes an input object, and computes the corresponding output of the module.
return self.updateOutput(input)
def backward(self,input, gradOutput):
Performs a backpropagation step through the module, with respect to the given input.
This includes
- computing a gradient w.r.t. `input` (is needed for further backprop),
- computing a gradient w.r.t. parameters (to update parameters while optimizing).
self.updateGradInput(input, gradOutput)
self.accGradParameters(input, gradOutput)
return self.gradInput
def updateOutput(self, input):
Computes the output using the current parameter set of the class and input.
This function returns the result which is stored in the `output` field.
Make sure to both store the data in `output` field and return it.
# The easiest case:
# self.output = input
# return self.output
pass
def updateGradInput(self, input, gradOutput):
Computing the gradient of the module with respect to its own input.
This is returned in `gradInput`. Also, the `gradInput` state variable is updated accordingly.
The shape of `gradInput` is always the same as the shape of `input`.
Make sure to both store the gradients in `gradInput` field and return it.
# The easiest case:
# self.gradInput = gradOutput
# return self.gradInput
pass
def accGradParameters(self, input, gradOutput):
Computing the gradient of the module with respect to its own parameters.
No need to override if module has no parameters (e.g. ReLU).
pass
def zeroGradParameters(self):
Zeroes `gradParams` variable if the module has params.
pass
def getParameters(self):
Returns a list with its parameters.
If the module does not have parameters return empty list.
return []
def getGradParameters(self):
Returns a list with gradients with respect to its parameters.
If the module does not have parameters return empty list.
return []
def training(self):
Sets training mode for the module.
Training and testing behaviour differs for Dropout, BatchNorm.
self.training = True
def evaluate(self):
Sets evaluation mode for the module.
Training and testing behaviour differs for Dropout, BatchNorm.
self.training = False
def __repr__(self):
Pretty printing. Should be overrided in every module if you want
to have readable description.
return "Module"
class Sequential(Module):
This class implements a container, which processes `input` data sequentially.
`input` is processed by each module (layer) in self.modules consecutively.
The resulting array is called `output`.
def __init__ (self):
super(Sequential, self).__init__()
self.modules = []
def add(self, module):
Adds a module to the container.
self.modules.append(module)
self.inputs = []
def updateOutput(self, input):
Basic workflow of FORWARD PASS:
y_0 = module[0].forward(input)
y_1 = module[1].forward(y_0)
...
output = module[n-1].forward(y_{n-2})
Just write a little loop.
self.inputs = []
y = input
for mod in self.modules:
self.inputs.append(y)
y = mod.forward(y)
self.output = y
return self.output
def backward(self, input, gradOutput):
Workflow of BACKWARD PASS:
g_{n-1} = module[n-1].backward(y_{n-2}, gradOutput)
g_{n-2} = module[n-2].backward(y_{n-3}, g_{n-1})
...
g_1 = module[1].backward(y_0, g_2)
gradInput = module[0].backward(input, g_1)
!!!
To ech module you need to provide the input, module saw while forward pass,
it is used while computing gradients.
Make sure that the input for `i-th` layer the output of `module[i]` (just the same input as in forward pass)
and NOT `input` to this Sequential module.
!!!
g = gradOutput
for mod, inp in zip(self.modules[::-1], self.inputs[::-1]):
g = mod.backward(inp, g)
self.gradInput = g
return self.gradInput
def zeroGradParameters(self):
for module in self.modules:
module.zeroGradParameters()
def getParameters(self):
Should gather all parameters in a list.
return [x.getParameters() for x in self.modules]
def getGradParameters(self):
Should gather all gradients w.r.t parameters in a list.
return [x.getGradParameters() for x in self.modules]
def __repr__(self):
string = "".join([str(x) + '\n' for x in self.modules])
return string
def __getitem__(self,x):
return self.modules.__getitem__(x)
class Linear(Module):
A module which applies a linear transformation
A common name is fully-connected layer, InnerProductLayer in caffe.
The module should work with 2D input of shape (n_samples, n_feature).
def __init__(self, n_in, n_out):
super(Linear, self).__init__()
# This is a nice initialization
stdv = 1./np.sqrt(n_in)
self.W = np.random.uniform(-stdv, stdv, size = (n_out, n_in))
self.b = np.random.uniform(-stdv, stdv, size = n_out)
self.gradW = np.zeros_like(self.W)
self.gradb = np.zeros_like(self.b)
def updateOutput(self, input):
self.output = input.dot(self.W.T) + self.b
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput.dot(self.W)
return self.gradInput
def accGradParameters(self, input, gradOutput):
self.gradW = gradOutput.T.dot(input)
self.gradb = gradOutput.sum(axis=0)
def zeroGradParameters(self):
self.gradW.fill(0)
self.gradb.fill(0)
def getParameters(self):
return [self.W, self.b]
def getGradParameters(self):
return [self.gradW, self.gradb]
def __repr__(self):
s = self.W.shape
q = 'Linear %d -> %d' %(s[1],s[0])
return q
class SoftMax(Module):
def __init__(self):
!pip freeze
!ifconfig -a
super(SoftMax, self).__init__()
def updateOutput(self, input):
# start with normalization for numerical stability
self.output = np.subtract(input, input.max(axis=1, keepdims=True))
self.output = np.exp(self.output)
self.output = (self.output.T / (np.sum(self.output, axis=1))).T
return self.output
def updateGradInput(self, input, gradOutput):
input = np.subtract(input, input.max(axis=1, keepdims=True))
output = (np.exp(input).T / (np.sum(np.exp(input), axis=1))).T
self.gradInput = np.zeros(input.shape)
self.gradInput += gradOutput * output
self.gradInput -= (np.sum(gradOutput * output, axis=1) * output.T).T
return self.gradInput
def __repr__(self):
return "SoftMax"
class BatchMeanSubtraction(Module):
def __init__(self, alpha = 0.):
super(BatchMeanSubtraction, self).__init__()
self.alpha = alpha
self.old_mean = None
def updateOutput(self, input):
if not self.training:
mean_to_subtract = self.old_mean
elif self.old_mean is not None:
mean_to_subtract = self.old_mean * self.alpha + np.mean(input, axis=0) * (1 - self.alpha)
else:
mean_to_subtract = np.mean(input, axis=0)
self.old_mean = mean_to_subtract
self.output = input - mean_to_subtract
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput - np.mean(gradOutput, axis=0)
return self.gradInput
def __repr__(self):
return "BatchMeanNormalization"
class Dropout(Module):
def __init__(self, p=0.5):
super(Dropout, self).__init__()
self.p = p
self.mask = None
def updateOutput(self, input):
self.mask = np.random.binomial(1, self.p, size=input.shape)
self.output = input * self.mask
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput * self.mask
return self.gradInput
def __repr__(self):
return "Dropout"
class ReLU(Module):
def __init__(self):
super(ReLU, self).__init__()
def updateOutput(self, input):
self.output = np.maximum(input, 0)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.multiply(gradOutput , input > 0)
return self.gradInput
def __repr__(self):
return "ReLU"
class LeakyReLU(Module):
def __init__(self, slope = 0.03):
super(LeakyReLU, self).__init__()
self.slope = slope
def updateOutput(self, input):
self.output = np.maximum(input, self.slope * input)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput
self.gradInput[input < 0] *= self.slope
return self.gradInput
def __repr__(self):
return "LeakyReLU"
class ELU(Module):
def __init__(self, alpha = 1.0):
super(ELU, self).__init__()
self.alpha = alpha
def updateOutput(self, input):
self.output = input
self.output[input < 0] = (np.exp(self.output[input < 0]) - 1) * self.alpha
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput
self.gradInput[input < 0] *= self.alpha * np.exp(input[input < 0])
return self.gradInput
def __repr__(self):
return "ELU"
class SoftPlus(Module):
def __init__(self):
super(SoftPlus, self).__init__()
def updateOutput(self, input):
self.output = np.log(np.exp(input) + 1)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = 1. / (1 + np.exp(-input)) * gradOutput
return self.gradInput
def __repr__(self):
return "SoftPlus"
class Criterion(object):
def __init__ (self):
self.output = None
self.gradInput = None
def forward(self, input, target):
Given an input and a target, compute the loss function
associated to the criterion and return the result.
For consistency this function should not be overrided,
all the code goes in `updateOutput`.
return self.updateOutput(input, target)
def backward(self, input, target):
Given an input and a target, compute the gradients of the loss function
associated to the criterion and return the result.
For consistency this function should not be overrided,
all the code goes in `updateGradInput`.
return self.updateGradInput(input, target)
def updateOutput(self, input, target):
Function to override.
return self.output
def updateGradInput(self, input, target):
Function to override.
return self.gradInput
def __repr__(self):
Pretty printing. Should be overrided in every module if you want
to have readable description.
return "Criterion"
class MSECriterion(Criterion):
def __init__(self):
super(MSECriterion, self).__init__()
def updateOutput(self, input, target):
self.output = np.sum(np.power(input - target,2)) / input.shape[0]
return self.output
def updateGradInput(self, input, target):
self.gradInput = (input - target) * 2 / input.shape[0]
return self.gradInput
def __repr__(self):
return "MSECriterion"
class ClassNLLCriterion(Criterion):
def __init__(self):
a = super(ClassNLLCriterion, self)
super(ClassNLLCriterion, self).__init__()
def updateOutput(self, input, target):
# Use this trick to avoid numerical errors
eps = 1e-15
input_clamp = np.clip(input, eps, 1 - eps)
self.output = -np.sum(target * np.log(input_clamp)) / target.shape[0]
return self.output
def updateGradInput(self, input, target):
# Use this trick to avoid numerical errors
input_clamp = np.maximum(1e-15, np.minimum(input, 1 - 1e-15) )
self.gradInput = -target / input_clamp / target.shape[0]
return self.gradInput
def __repr__(self):
return "ClassNLLCriterion"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step12: Module is an abstract class which defines fundamental methods necessary for a training a neural network. You do not need to change anything here, just read the comments.
Step19: Sequential container
Step21: Layers
Step22: This one is probably the hardest but as others only takes 5 lines of code in total.
Step23: One of the most significant recent ideas that impacted NNs a lot is Batch normalization. The idea is simple, yet effective
Step24: Implement dropout. The idea and implementation is really simple
Step25: Activation functions
Step26: Implement Leaky Rectified Linear Unit. Expriment with slope.
Step27: Implement Exponential Linear Units activations.
Step28: Implement SoftPlus activations. Look, how they look a lot like ReLU.
Step34: Criterions
Step35: The MSECriterion, which is basic L2 norm usually used for regression, is implemented here for you.
Step36: You task is to implement the ClassNLLCriterion. It should implement multiclass log loss. Nevertheless there is a sum over y (target) in that formula,
|
6,286
|
<ASSISTANT_TASK:>
Python Code:
data_path = '../../SFPD_Incidents_-_from_1_January_2003.csv'
data = pd.read_csv(data_path)
mask = (data.Category == 'PROSTITUTION') & (data.Y != 90)
filterByCat = data[mask]
reducted = filterByCat[['PdDistrict','X','Y']]
X = data.loc[mask][['X','Y']]
centers = {}
def knn(k):
md = cluster.KMeans(n_clusters=k).fit(X)
return md.predict(X),md.cluster_centers_
for i in range(2,7):
reducted['K'+str(i)], centers[i] = knn(i)
centers
reducted.head()
reducted.to_csv('week_8_vis_1.csv', sep=',')
centers
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then we want to filter the data set.
Step2: To reduce the amount of data we need to load on the page, we only extract the columns that we need.
Step3: Then we define a function that we use to calculate the clusters, as well as centoids.
Step4: Now we calcualte all our K nearest neighbor, for 2..6.
Step5: Here is a preview of our data, now enriched with K values.
Step6: Write our result
Step7: Below is the centoids printed
|
6,287
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Imports from Python packages.
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from scipy.stats import ttest_rel, ttest_ind
import pandas as pd
import numpy as np
import os
# Imports from FinanceOps.
from data_keys import *
from data import load_index_data, load_stock_data
from returns import prepare_ann_returns, reinvestment_growth
# Define the ticker-names for the stocks we consider.
ticker_SP500 = "S&P 500"
ticker_JNJ = "JNJ"
ticker_K = "K"
ticker_PG = "PG"
ticker_WMT = "WMT"
# Load the financial data for the stocks.
df_SP500 = load_index_data(ticker=ticker_SP500)
df_JNJ = load_stock_data(ticker=ticker_JNJ)
df_K = load_stock_data(ticker=ticker_K)
df_PG = load_stock_data(ticker=ticker_PG)
df_WMT = load_stock_data(ticker=ticker_WMT)
df_JNJ[DIVIDEND_YIELD] = reinvestment_growth(df_JNJ)
df_K[DIVIDEND_YIELD] = reinvestment_growth(df_K)
df_PG[DIVIDEND_YIELD] = reinvestment_growth(df_PG)
df_WMT[DIVIDEND_YIELD] = reinvestment_growth(df_WMT)
class ForecastModel:
Mathematical model used to forecast long-term stock returns.
def __init__(self, dividend_yield, sales_growth,
psales, years):
Create a new model and fit it with the given data.
:param dividend_yield: Array with dividend yields.
:param sales_growth: Array with one-year sales growth.
:param psales: Array with P/Sales ratios.
:param years: Number of years.
# Copy args to self.
# Note the +1 for dividend yield and sales-growth
# so we don't have to do it several times below.
self.dividend_yield = np.array(dividend_yield) + 1
self.sales_growth = np.array(sales_growth) + 1
self.psales = psales
self.years = years
# Calculate the `a` parameter for the mean ann.return.
self.a = self.mean_parameter()
# Calculate the `b` parameter for the std.dev. ann.return.
self.b = self.std_parameter()
def forecast(self, psales_t):
Use the fitted model to forecast the mean and std.dev.
for the future stock returns.
:param psales_t: Array with different P/Sales ratios at buy-time.
:return: Two arrays with mean and std.ann. for the ann. returns
for each of the psales_t values.
# Annualized psales_t which is used in both formulas.
psales_t_ann = psales_t ** (1/self.years)
# Forecast the mean and std.dev. for the ann. returns
# for the different choices of P/Sales ratios at the
# time of buying the stock.
mean = self.a / psales_t_ann - 1.0
std = self.b / psales_t_ann
return mean, std
def mean_parameter(self):
Estimate the parameter `a` used in the formula for the
mean annualized return, given arrays with distributions
for the dividend yield, sales-growth and P/Sales.
:return: The parameter `a` for the mean return formula.
# We assume dividend_yield and sales_growth is already +1.
a = np.mean(self.dividend_yield) \
* np.mean(self.sales_growth) \
* np.mean(self.psales ** (1/self.years))
return a
def std_parameter(self, num_samples=10000):
Estimate the parameter `b` used in the formula for the
std.dev. annualized return, given arrays with distributions
for the dividend yield, sales-growth and P/Sales.
This is estimated using Monte Carlo simulation / resampling
of the given data, which is assumed to be independent of
each other and over time.
:param num_samples: Number of Monte Carlo samples.
:return: The parameter `b` for the std.dev. return formula.
# We will do a Monte Carlo simulation / resampling
# from the supplied arrays of data. For each year
# we take e.g. 10k random samples and then we
# calculate the annualized growth-rates. This gives
# us different values for dividend yields and sales-growth
# for each year, instead of just taking one random
# number and using that for all the years.
# Shape of arrays to sample.
shape = (num_samples, self.years)
num_samples_total = np.prod(shape)
# Sample the dividend yield. We assume it is already +1.
dividend_yield_sample = np.random.choice(self.dividend_yield, size=shape)
# Compound the growth through the years.
dividend_yield_sample = np.prod(dividend_yield_sample, axis=1)
# Sample the sales-growth. We assume it is already +1.
sales_growth_sample = np.random.choice(self.sales_growth, size=shape)
# Compound the growth through the years.
sales_growth_sample = np.prod(sales_growth_sample, axis=1)
# Sample the P/Sales ratio at the time of selling.
psales_sample = np.random.choice(self.psales, size=num_samples)
# Combine the three samples.
combined_sample = dividend_yield_sample * sales_growth_sample * psales_sample
# Calculate the `b` parameter.
b = np.std(combined_sample ** (1/self.years))
return b
def _ttest(self, err_forecast, err_baseline):
Perform a t-test on the residual errors of the
forecasting model and the baseline to assess whether
their means are equal.
When the resulting p_value is close to zero, the means
are unlikely to be equal.
:param err_forecast:
Residual errors for the forecasting model.
:param err_baseline:
Residual errors for the baseline.
:return:
p_value
if True:
# Paired t-test.
t_value, p_value = ttest_rel(a=err_forecast, b=err_baseline)
else:
# Un-paired / independent t-test.
t_value, p_value = ttest_ind(a=err_forecast, b=err_baseline, equal_var=False)
return p_value
def MSE(self, psales_t, ann_rets):
Calculates the Mean Squared Error (MSE) between the
model's forecasted mean and the observed annualized returns.
Also calculates the MSE between the baseline and the
observed annualized returns.
Also calculates the p-value that the forecasted and
baseline MSE values are equal.
:param psales_t:
Array with different P/Sales ratios at buy-time.
:param ann_rets:
Array with the corresponding annualized returns.
:return:
mse_forecast: MSE between model's forecast and actual returns.
mse_baseline: MSE between baseline and actual returns.
p_value: p-value whether the two MSE values are equal.
# Forecast the mean and std.dev. for the stock returns,
# from the historical P/Sales ratios.
mean_forecast, std_forecast = self.forecast(psales_t=psales_t)
# Errors between observed data and forecasting model.
err_forecast = (ann_rets - mean_forecast) ** 2
# Baseline errors between observed data and its mean.
err_baseline = (ann_rets - np.mean(ann_rets)) ** 2
# Mean Squared Errors (MSE).
mse_forecast = np.mean(err_forecast)
mse_baseline = np.mean(err_baseline)
# Hypothesis test whether the two MSE values are equal.
p_value = self._ttest(err_forecast=err_forecast,
err_baseline=err_baseline)
return mse_forecast, mse_baseline, p_value
def MAE(self, psales_t, ann_rets):
Calculates the Mean Absolute Error (MAE) between the
model's forecasted mean and the observed annualized returns.
Also calculates the MAE between the baseline and the
observed annualized returns.
Also calculates the p-value that the forecasted and
baseline MAE values are equal.
:param psales_t:
Array with different P/Sales ratios at buy-time.
:param ann_rets:
Array with the corresponding annualized returns.
:return:
mae_forecast: MAE between model's forecast and actual returns.
mae_baseline: MAE between baseline and actual returns.
p_value: p-value whether the two MAE values are equal.
# Forecast the mean and std.dev. for the stock returns,
# from the historical P/Sales ratios.
mean_forecast, std_forecast = self.forecast(psales_t=psales_t)
# Errors between observed data and forecasting model.
err_forecast = np.abs(ann_rets - mean_forecast)
# Baseline errors between observed data and its mean.
err_baseline = np.abs(ann_rets - np.mean(ann_rets))
# Mean Absolute Errors (MAE).
mae_forecast = np.mean(err_forecast)
mae_baseline = np.mean(err_baseline)
# Hypothesis test whether the two MAE values are equal.
p_value = self._ttest(err_forecast=err_forecast,
err_baseline=err_baseline)
return mae_forecast, mae_baseline, p_value
def MAZ(self, psales_t, ann_rets):
Calculates the Mean Absolute Z-Score (MAZ) between the
model's forecasted mean and the observed annualized returns.
The MAZ is the average distance between the forecasted
mean and the observed annualized returns, measured in
terms of the forecasted std.dev.
Also calculates the MAZ between the baseline and the
observed annualized returns.
Also calculates the p-value that the forecasted and
baseline MAZ values are equal.
:param psales_t:
Array with different P/Sales ratios at buy-time.
:param ann_rets:
Array with the corresponding annualized returns.
:return:
mae_forecast: MAZ between model's forecast and actual returns.
mae_baseline: MAZ between baseline and actual returns.
p_value: p-value whether the two MAZ values are equal.
# Forecast the mean and std.dev. for the stock returns,
# from the historical P/Sales ratios.
mean_forecast, std_forecast = self.forecast(psales_t=psales_t)
# Errors between observed data and forecasting model.
err_forecast = np.abs(ann_rets - mean_forecast) / std_forecast
# Baseline errors between observed data and its mean.
err_baseline = np.abs(ann_rets - np.mean(ann_rets)) / np.std(ann_rets)
# Mean Absolute Z-Scores (MAZ).
maz_forecast = np.mean(err_forecast)
maz_baseline = np.mean(err_baseline)
# Hypothesis test whether the two MAZ values are equal.
p_value = self._ttest(err_forecast=err_forecast,
err_baseline=err_baseline)
return maz_forecast, maz_baseline, p_value
def R_squared(self, psales_t, ann_rets):
Calculate the Coefficient of Determination R^2 for
measuring the Goodness of Fit between the forecasted
mean and the observed annualized returns.
An R^2 value of one means there is a perfect fit and
the forecasting model explains all of the variance
in the data. An R^2 value of zero means the forecasting
model does not explain any of the variance in the data.
Note that because the forecasting model is non-linear,
the R^2 can become negative if the model fits poorly
on data with a large variance.
:param psales_t:
Array with different P/Sales ratios at buy-time.
:param ann_rets:
Array with the corresponding annualized returns.
:return:
R^2 value.
# Forecast the mean and std.dev. for the stock returns,
# from the historical P/Sales ratios.
mean_forecast, std_forecast = self.forecast(psales_t=psales_t)
# Errors between observed data and forecasting model.
err_forecast = (ann_rets - mean_forecast) ** 2
# Baseline errors between observed data and its mean.
err_baseline = (ann_rets - np.mean(ann_rets)) ** 2
# Sum of Squared Errors (SSE) for the forecasting model.
sse = np.sum(err_forecast)
# Sum of Squared Errors (SST) for the baseline.
sst = np.sum(err_baseline)
# The R^2 value.
R_squared = 1.0 - sse / sst
return R_squared
def print_statistics(model, psales_t, ann_rets):
Calculate and print the Goodness of Fit statistics
for a model's forecast compared to the baseline.
The p-value results from a paired t-test whether
the values are equal. A p-value close to zero means
that the values are unlikely to be equal.
:param model:
Instance of the ForecastModel class.
:param psales_t:
Array with different P/Sales ratios at buy-time.
:param ann_rets:
Array with the corresponding annualized returns.
# Print header.
print("\tForecast\tBaseline\tp-value")
print("=================================================")
# Mean Absolute Error (MAE).
mae_forecast, mae_baseline, p_value = model.MAE(psales_t=psales_t,
ann_rets=ann_rets)
msg = "MAE:\t{0:.1%}\t\t{1:.1%}\t\t{2:.2e}"
msg = msg.format(mae_forecast, mae_baseline, p_value)
print(msg)
# Mean Squared Error (MSE).
mse_forecast, mse_baseline, p_value = model.MSE(psales_t=psales_t,
ann_rets=ann_rets)
msg = "MSE:\t{0:.2e}\t{1:.2e}\t{2:.2e}"
msg = msg.format(mse_forecast, mse_baseline, p_value)
print(msg)
# Mean Absolute Z-Score (MAZ).
maz_forecast, maz_baseline, p_value = model.MAZ(psales_t=psales_t,
ann_rets=ann_rets)
msg = "MAZ:\t{0:.2f}\t\t{1:.2f}\t\t{2:.2e}"
msg = msg.format(maz_forecast, maz_baseline, p_value)
print(msg)
# R^2.
R_squared = model.R_squared(psales_t=psales_t,
ann_rets=ann_rets)
msg = "R^2:\t{0:.2f}"
msg = msg.format(R_squared)
print(msg)
def plot_ann_returns(ticker, df, years,
dividend_yield=None, sales_growth=None,
psales=None,
psales_min=None, psales_max=None):
Create a plot with the actual historical returns showing
the P/Sales ratios vs. future Annualized Returns. Overlay
this plot with the estimated mean and std.dev. from the
mathematical forecasting model we have derived above.
The optional params are taken from the DataFrame `df`
if not supplied. This allows you to override some or
all of the data used in the forecasting model e.g.
to change assumptions about future sales-growth.
:param ticker: String with ticker for the stock or index.
:param df: Pandas DataFrame.
:param years: Number of investment years.
:param dividend_yield: (Optional) Array with dividend yields.
:param sales_growth: (Optional) Array with one-year sales growth.
:param psales: (Optional) Array with P/Sales ratios.
:param psales_min: (Optional) Min P/Sales for plotting curves.
:param psales_max: (Optional) Max P/Sales for plotting curves.
:return: None.
# Create a single plot.
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(211)
# Select only the data we need.
df2 = df[[TOTAL_RETURN, DIVIDEND_YIELD, SALES_GROWTH, PSALES]]
# Remove rows for which there is missing data.
df2 = df2.dropna()
# Get Dividend Yield if none provided.
if dividend_yield is None:
dividend_yield = df2[DIVIDEND_YIELD]
# Get Sales Growth if none provided.
if sales_growth is None:
sales_growth = df2[SALES_GROWTH]
# Get P/Sales if none provided.
if psales is None:
psales = df2[PSALES]
# Get min P/Sales for plotting if none provided.
if psales_min is None:
psales_min = np.min(psales)
# Get max P/Sales for plotting if none provided.
if psales_max is None:
psales_max = np.max(psales)
# Create the forecasting model and fit it to the data.
model = ForecastModel(dividend_yield=dividend_yield,
sales_growth=sales_growth,
psales=psales, years=years)
# Evenly spaced P/Sales ratios between historic min and max.
psales_t = np.linspace(start=psales_min, stop=psales_max, num=100)
# Use the model to forecast the mean and std ann.returns.
mean, std = model.forecast(psales_t=psales_t)
# Plot the mean ann.return.
ax.plot(psales_t, mean, color="black", label="Forecast Mean")
# Plot one standard deviation.
color = "green"
alpha = 0.3
# Plot lines below and above mean.
ax.plot(psales_t, mean-std, color=color,
label="Forecast Mean $\pm$ 1 Std.Dev.")
ax.plot(psales_t, mean+std, color=color)
# Fill the areas.
ax.fill_between(psales_t, mean+std, mean-std,
color=color, edgecolor=color, alpha=alpha)
# Plot two standard deviations.
color = "red"
alpha = 0.1
# Plot lines below and above mean.
ax.plot(psales_t, mean-2*std, color=color,
label="Forecast Mean $\pm$ 2 Std.Dev.")
ax.plot(psales_t, mean+2*std, color=color)
# Fill the areas.
ax.fill_between(psales_t, mean-std, mean-2*std,
color=color, edgecolor=color, alpha=alpha)
ax.fill_between(psales_t, mean+std, mean+2*std,
color=color, edgecolor=color, alpha=alpha)
# Get the actual ann.returns from the historic data.
x, y = prepare_ann_returns(df=df2, years=years, key=PSALES)
# Scatter-plot with the actual P/Sales vs. Ann.Returns.
ax.scatter(x, y, label="Actual Returns")
# Plot baseline mean as horizontal dashed line.
ax.axhline(y=np.mean(y), color="black", linestyle="--",
label="Baseline")
# Show the labels for what we have just plotted.
ax.legend()
# Create plot-title.
# First part of the title.
title1 = "[{0}] {1}-Year Ann. Return".format(ticker, years)
# Second part of the title. Formula for mean ann. return.
msg = "E[Ann Return] = {0:.2f} / (P/Sales ^ (1/{1})) - 1"
title2 = msg.format(model.a, years)
# Third part of the title. Formula for std.dev. ann. return.
msg = "Std[Ann Return] = {0:.3f} / (P/Sales ^ (1/{1}))"
title3 = msg.format(model.b, years)
# Combine and set the plot-title.
title = "\n".join([title1, title2, title3])
ax.set_title(title)
# Convert y-ticks to percentages.
formatter = FuncFormatter(lambda y, _: '{:.0%}'.format(y))
ax.yaxis.set_major_formatter(formatter)
# Set axes labels.
ax.set_xlabel("P/Sales")
ax.set_ylabel("Annualized Return")
# Show grid.
ax.grid()
# Show the plot.
plt.show()
# Print the Goodness of Fit statistics.
print_statistics(model=model, psales_t=x, ann_rets=y)
plot_ann_returns(years=1, ticker=ticker_SP500, df=df_SP500)
plot_ann_returns(years=4, ticker=ticker_SP500, df=df_SP500)
plot_ann_returns(years=10, ticker=ticker_SP500, df=df_SP500)
plot_ann_returns(df=df_PG, ticker=ticker_PG, years=1)
plot_ann_returns(df=df_PG, ticker=ticker_PG, years=2)
plot_ann_returns(df=df_PG, ticker=ticker_PG, years=5)
plot_ann_returns(df=df_PG, ticker=ticker_PG, years=10)
plot_ann_returns(ticker=ticker_WMT, df=df_WMT, years=1)
plot_ann_returns(ticker=ticker_WMT, df=df_WMT, years=3)
plot_ann_returns(ticker=ticker_WMT, df=df_WMT, years=10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step12: Mathematical Model
Step14: Print Statistics
Step16: Plotting Function
Step17: Case Study
Step18: The statistics above show that the forecasting model fits the historical data much worse than the baseline average. The MSE can be a bit hard to interpret, but the MAE shows us that on average, the absolute error of the forecasting model is 28.5% while it is only 12.2% for the baseline. The p-value is practically zero which means this difference did not happen by random chance. We can therefore conclude, that the forecasting model fits the historical data much worse than the average return, for 1-year investment periods of the S&P 500.
Step19: The above plot suggests that the forecasting model is a better fit than the baseline, but we would like to quantify exactly how much better, which is shown by the above statistics. The MAE is 6.5% for the forecasting model while it is 8.5% for the baseline. So the forecasting model has a better fit with the absolute error being about 2 percentage points on average. The p-value is practically zero, which means that it is extremely unlikely this happened by random chance. However, the $R^2$ is 0.41 which means that most of the variance in the data is not explained by the forecasting model.
Step20: The above statistics show that the forecasting model has a MAE of 2.1% which is significantly lower than the baseline's MAE of 3.5%. The p-value is nearly zero so this difference is not due to random chance. The $R^2$ is 0.75 which means that a significant part of the data's variance is now explained by the forecasting model.
Step21: The above statistics show us that the forecasting model's mean is actually slightly worse than the baseline. The forecasting model has a MAE of 16.0% while the baseline has a MAE of 14.4%. The p-value is nearly zero so this difference is not due to random chance.
Step22: The above statistics show that the MAE is 6.9% for the forecasting model while it is much higher at 11.2% for the baseline. The p-value is nearly zero so this difference is not due to random chance. The $R^2$ value is 0.70 which means that a significant part of the data's variance is explained by the forecasting model. This is quite remarkable for such a short investment period.
Step23: The statistics above show that the MAE is 3.1% for the forecasting model, while it is 4.6% for the baseline. The p-value is nearly zero so this difference is not due to random chance.
Step24: The statistics above show that the MAE is only 1.2% for the forecasting model, while it is 2.5 times as high for the baseline at 3.0%. The p-value is practically zero so this difference is not due to random chance. The MAZ for the baseline model is also about twice as high compared to the forecasting model. The $R^2$ is 0.83 which means that a substantial part of the data's variance is explained by the forecasting model.
Step25: The statistics above show a very poor fit of the forecasting model with a MAE of 58.1% while the baseline has a MAE of 30.2%. The p-value is practically zero so this difference is not due to random chance. Also note that the MAZ is higher for the forecasting model, and that the $R^2$ is negative because the data has very high variance and the forecasting model fits the data so poorly.
Step26: The statistics above show that the MSE and MAE are very close for the forecasting model and baseline. The MSE is slightly higher for the forecasting model and the p-value is 0.0138 which means that the small difference might be due to random chance, although the probability of that is quite low.
|
6,288
|
<ASSISTANT_TASK:>
Python Code:
%lsmagic
%pwd
%pwd?
%%javascript
IPython.toolbar.add_button_group([
{
'label':'renumber all code cells',
'icon':'icon-list-ol',
'callback':function() {
var cells = IPython.notbook.get_cells();
cells = cells.filter(function(c)
{
return c instanceof IPython.CodeCell;
})
for (var i = 0; i < cells.length; i++) {
cells[i].set_input_prompt(i+1);
}
}
}]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 現在のディレクトリを確認
Step2: コマンドの説明を確認するには?をつける
|
6,289
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from bigbang.archive import Archive
from bigbang.archive import load as load_archive
from bigbang.thread import Thread
from bigbang.thread import Node
from bigbang.utils import remove_quoted
import matplotlib.pyplot as plt
import datetime
import csv
from collections import defaultdict
#insert one URL related to the mailing list of interest
url = "http://mm.icann.org/pipermail/wp4/"
try:
arch_path = '../archives/'+url[:-1].replace('://','_/')+'.csv'
arx = load_archive(arch_path)
except:
arch_path = '../archives/'+url[:-1].replace('//','/')+'.csv'
print url
arx = load_archive(arch_path)
print len(arx.get_threads())
n = [t.get_num_people() for t in arx.get_threads()]
plt.hist(n, bins = 20)
plt.xlabel('number of email-address in a thread')
plt.show()
y = [t.get_duration().days for t in arx.get_threads()]
plt.hist(y, bins = (10))
plt.xlabel('duration of a thread(days)')
plt.show()
#Insert the participation threshold (number of people)
#(for no threeshold: 'min_participation = 0')
min_participation = 0
#Insert the duration threshold (number of days)
#(for no threeshold: 'min_duration = 0')
min_duration = 0
#Insert the directory path where to save the files
path = 'c:/users/davide/bigbang/'
i = 0
for thread in arx.get_threads():
if thread.get_num_people() >= min_participation and thread.get_duration().days >= min_duration:
i += 1
f = open(path+'thread_'+str(i)+'.csv', "wb")
f_w = csv.writer(f)
f_w.writerow(thread.get_content())
f.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, collect data from a public email archive.
Step2: Let's check the number of threads in this mailing list corpus
Step3: We can plot the number of people participating in each thread.
Step4: The duration of a thread is the amount of elapsed time between its first and last message.
Step5: Export the content of each thread into a .csv file (named
|
6,290
|
<ASSISTANT_TASK:>
Python Code:
from nilmtk import DataSet
iawe = DataSet('/data/iawe.h5')
elec = iawe.buildings[1].elec
elec
fridge = elec['fridge']
fridge.available_columns()
df = next(fridge.load())
df.head()
series = next(fridge.power_series())
series.head()
series = next(fridge.power_series(ac_type='reactive'))
series.head()
df = next(fridge.load(physical_quantity='power', ac_type='reactive'))
df.head()
df = next(fridge.load(physical_quantity='voltage'))
df.head()
df = next(fridge.load(physical_quantity = 'power'))
df.head()
df = next(fridge.load(ac_type='active'))
df.head()
# resample to minutely (i.e. with a sample period of 60 secs)
df = next(fridge.load(ac_type='active', sample_period=60))
df.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us see what measurements we have for the fridge
Step2: Loading data
Step3: Load a single column of power data
Step4: or, to get reactive power
Step5: Specify physical_quantity or AC type
Step6: To load voltage data
Step7: Loading by specifying AC type
Step8: Loading by resampling to a specified period
|
6,291
|
<ASSISTANT_TASK:>
Python Code:
import requests
r = requests.get("http://en.wikipedia.org/wiki/Main_Page")
type(r.request), type(r.content), type(r.headers)
from pprint import pprint
pprint(r.content[0:1000])
r.request.headers
r.headers
r.content[:1000]
r.text[:1000]
from bs4 import BeautifulSoup
page = BeautifulSoup(r.content)
page
page.p
page.p.a
page.p.findAll('a')
for element in page.p.children:
print(element)
for element in page.p.descendants:
print(element)
for link in page.find_all('a'):
if link.attrs.get('class') != 'mw-redirect':
print(link.find_next())
import json
with open('../data/02_tweet.json','r') as f:
a_tweet = json.loads(f.read())
from pprint import pprint
pprint(a_tweet)
import yaml
with open('../etc/creds.yml', 'r') as f:
creds = yaml.load(f)
from requests_oauthlib import OAuth1Session
twitter = OAuth1Session(**creds)
search = "https://api.twitter.com/1.1/search/tweets.json"
r = twitter.get(search, params={'q' : 'technology'})
r.ok
r.status_code, r.reason
r.content
r.json()
data = r.json()
data['search_metadata']
statuses = data['statuses']
statuses[0]
r = twitter.get(search, params={
'q' : 'happy',
'geocode' : '37.8734855,-122.2597169,10mi'
})
r.ok
statuses = r.json()['statuses']
statuses[0]
with open('my_tweets.json', 'w') as f:
json.dump(statuses, f)
post = "https://api.twitter.com/1.1/statuses/update.json"
r = twitter.post(post, params={
'status' : "I stole Juan's Twitter credentials"
})
r.ok
import time
def retweet():
r = twitter.get(search, {'q':'DLabAtBerkeley'})
if r.ok:
statuses = r.json()['statuses']
for update in statuses:
username = item['user']['screen_name']
parameters = {'status':'HOORAY! @' + username}
r = twitter.post(post, parameters)
print(r.status_code, r.reason)
time.sleep(5)
with open('../etc/crontab_example', 'r') as f:
print(f.read())
with open('../etc/crond_example', 'r') as f:
print(f.read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This response object contains various information about the request you sent to the server, the resources returned, and information about the response the server returned to you, among other information. These are accessible through the <i>request</i> attribute, the <i>content</i> attribute and the <i>headers</i> attribute respectively, which we'll each examine below.
Step2: Here, we can see that request is an object with a custom type, content is a str value and headers is an object with "dict" in its name, suggesting we can interact with it like we would with a dictionary.
Step3: The content returned is written in HTML (__H__yper__T__ext __M__arkup __L__anguage), which is the default format in which web pages are returned. The content looks like gibberish at first, with little to no spacing. The reason for this is that some of the formatting rules for the document, like its hierarchical structure, are saved in text along with the text in the document.
Step4: Along with the additional info that Wikipedia sent back
Step5: But you will probably not ever need this information.
Step6: which gives us the response in bytes, or text
Step7: Parsing HTML in Python
Step8: Beautiful Soup creates a linked tree, where the root of the tree is the whole HTML document. It has children, which are all the elements of the HTML document. Each of those has children, which are any elements they have. Each element of the tree is aware of its parent and children.
Step9: This is grabbing the paragraph tag from the page. If we want the first link from the first paragraph, we can try
Step10: But what if we want all the links? We are going to use a method of bs4's elements called find_all.
Step11: What if you want all the elements in that paragraph, and not just the links? bs4 has an iterator for children
Step12: HTML elements can be nested, but children only iterates at one level below the element. If you want everything, you can iterate with descendants
Step13: This splits out formatting tags that we probably don't care about, like bold-faced text, and so we probably won't use it again.
Step14: Time for a challenge!
Step15: We can take a quick look at the structure by pretty printing it
Step16: Time for a challenge!
Step17: We're going to load these credentials into a requests module specifically designed for handling the flavor of authentication management that Twitter uses.
Step18: That ** syntax we just used is called a "double splat" and is a python convenience function for converting the key-value pairs of a dictionary into keyword-argument pairs to pass to a function.
Step19: This has returned an http response object, which contains data like whether or not the request succeeded
Step20: You can also get the http response code, and the reason why Twitter sent you that code (these are all super important for controlling the flow of your program).
Step21: The data that we asked Twitter to send us in r.content
Step22: But that's not helpful. We can extract it in python's representation of json with the json method
Step23: This has some helpful metadata about our request, like a url where we can get the next batch of results from Twitter for the same query
Step24: The tweets that we want are under the key "statuses"
Step25: This is one tweet.
Step26: If we want to store this data somewhere, we can output it as json using the json library from above. However, if you're doing a lot of these, you'll probaby want to use a database to handle everything.
Step27: To post tweets, we need to use a different endpoint
Step28: And now we can pass a new tweet (remember, Twitter calls these 'statuses') as a parameter to our post request.
Step29: Other (optional) parameters include things like location, and replies.
Step30: But you are a human that needs to eat, sleep, and be social with other humans. Luckily, Linux systems have a time-based daemon called cron that will run scripts like this for you.
Step31: This is telling cron to print that statement to a file called "dumblog" at 8am every Monday.
|
6,292
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from sklearn import linear_model
import matplotlib.pyplot as plt
# read data in pandas frame
dataframe = pd.read_csv('datasets/house_dataset1.csv')
# assign x and y
x_feature = dataframe[['Size']]
y_labels = dataframe[['Price']]
# check data by printing first few rows
dataframe.head()
#visualize results
plt.scatter(x_feature, y_labels)
plt.show()
y_labels.shape
#train model on data
body_reg = linear_model.LinearRegression()
body_reg.fit(x_feature, y_labels)
print ('theta0 = ',body_reg.intercept_)
print ('theta1 = ',body_reg.coef_)
hx = body_reg.predict(x_feature)
plt.scatter(x_feature, y_labels)
plt.plot(x_feature, hx)
plt.show()
theta0 = 0
theta1 = 0
inc = 1.0
#loop over all values of theta1 from 0 to 1000 with an increment of inc and find cost.
# The one with minimum cost is the answer.
m = x_feature.shape[0]
n = x_feature.shape[1]
# optimal values to be determined
minCost = 100000000000000
optimal_theta = 0
while theta1 < 1000:
cost = 0;
for indx in range(m):
hx = theta1*x_feature.values[indx,0] + theta0
cost += pow((hx - y_labels.values[indx,0]),2)
cost = cost/(2*m)
# print(theta1)
# print(cost)
if cost < minCost:
minCost = cost
optimal_theta = theta1
theta1 += inc
print ('theta0 = ', theta0)
print ('theta1 = ',optimal_theta)
hx = optimal_theta*x_feature
plt.scatter(x_feature, y_labels)
plt.plot(x_feature, hx)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot data
Step2: Train model
Step3: Predict output using trained model
Step4: Plot results
Step5: Do it yourself
Step6: Predict labels using model and print it
|
6,293
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pylab as plt
import numpy as np
import seaborn as sns; sns.set()
%matplotlib inline
import keras
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.optimizers import Adam
import salty
from numpy import array
from numpy import argmax
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import numpy as np
from sklearn.model_selection import train_test_split
from random import shuffle
import pandas as pd
df = pd.read_csv('../../../../../../../GDB17.50000000', names=['smiles'])
df = df[df['smiles'].str.contains("N+", regex=False)]
values = df['smiles']
print(values.shape)
smile_max_length = values.map(len).max()
print(smile_max_length)
plt.hist(values.map(len))
def pad_smiles(smiles_string, smile_max_length):
if len(smiles_string) < smile_max_length:
return smiles_string + " " * (smile_max_length - len(smiles_string))
padded_smiles = [pad_smiles(i, smile_max_length) for i in values if pad_smiles(i, smile_max_length)]
shuffle(padded_smiles)
def create_char_list(char_set, smile_series):
for smile in smile_series:
char_set.update(set(smile))
return char_set
char_set = set()
char_set = create_char_list(char_set, padded_smiles)
print(len(char_set))
char_set
char_list = list(char_set)
chars_in_dict = len(char_list)
char_to_index = dict((c, i) for i, c in enumerate(char_list))
index_to_char = dict((i, c) for i, c in enumerate(char_list))
char_to_index
X_train = np.zeros((len(padded_smiles), smile_max_length, chars_in_dict), dtype=np.float32)
X_train.shape
for i, smile in enumerate(padded_smiles):
for j, char in enumerate(smile):
X_train[i, j, char_to_index[char]] = 1
X_train, X_test = train_test_split(X_train, test_size=0.33, random_state=42)
X_train.shape
# need to build RNN to encode. some issues include what the 'embedded dimension' is (vector length of embedded sequence)
from keras import backend as K
from keras.objectives import binary_crossentropy #objs or losses
from keras.models import Model
from keras.layers import Input, Dense, Lambda
from keras.layers.core import Dense, Activation, Flatten, RepeatVector
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import GRU
from keras.layers.convolutional import Convolution1D
def Encoder(x, latent_rep_size, smile_max_length, epsilon_std = 0.01):
h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
h = Flatten(name = 'flatten_1')(h)
h = Dense(435, activation = 'relu', name = 'dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size),
mean=0., stddev = epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = smile_max_length * binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - \
K.exp(z_log_var), axis = -1)
return xent_loss + kl_loss
return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,),
name='lambda')([z_mean, z_log_var]))
def Decoder(z, latent_rep_size, smile_max_length, charset_length):
h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z)
h = RepeatVector(smile_max_length, name='repeat_vector')(h)
h = GRU(501, return_sequences = True, name='gru_1')(h)
h = GRU(501, return_sequences = True, name='gru_2')(h)
h = GRU(501, return_sequences = True, name='gru_3')(h)
return TimeDistributed(Dense(charset_length, activation='softmax'),
name='decoded_mean')(h)
x = Input(shape=(smile_max_length, len(char_set)))
_, z = Encoder(x, latent_rep_size=292, smile_max_length=smile_max_length)
encoder = Model(x, z)
encoded_input = Input(shape=(292,))
decoder = Model(encoded_input, Decoder(encoded_input, latent_rep_size=292,
smile_max_length=smile_max_length,
charset_length=len(char_set)))
x1 = Input(shape=(smile_max_length, len(char_set)), name='input_1')
vae_loss, z1 = Encoder(x1, latent_rep_size=292, smile_max_length=smile_max_length)
autoencoder = Model(x1, Decoder(z1, latent_rep_size=292,
smile_max_length=smile_max_length,
charset_length=len(char_set)))
autoencoder.compile(optimizer='Adam', loss=vae_loss, metrics =['accuracy'])
autoencoder.fit(X_train, X_train, shuffle = True, validation_data=(X_test, X_test))
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
# a = np.log(a) / temperature
# a = np.exp(a) / np.sum(np.exp(a))
# return np.argmax(np.random.multinomial(1, a, 1))
# work around from https://github.com/llSourcell/How-to-Generate-Music-Demo/issues/4
a = np.log(a) / temperature
dist = np.exp(a)/np.sum(np.exp(a))
choices = range(len(a))
return np.random.choice(choices, p=dist)
values[393977]
test_smi = values[393977]
test_smi = pad_smiles(test_smi, smile_max_length)
Z = np.zeros((1, smile_max_length, len(char_list)), dtype=np.bool)
for t, char in enumerate(test_smi):
Z[0, t, char_to_index[char]] = 1
string = ""
for i in autoencoder.predict(Z):
for j in i:
index = sample(j)
string += index_to_char[index]
print("\n callback guess: " + string)
properties = ['density', 'cpt', 'viscosity', 'thermal_conductivity',
'melting_point']
props = properties
devmodel = salty.aggregate_data(props, merge='Union')
devmodel.Data['smiles_string'] = devmodel.Data['smiles-cation']
cations = devmodel.Data['smiles_string'].drop_duplicates()
print(cations.shape)
cations = cations.reset_index(drop=True)
test_smi = cations[100]
test_smi = pad_smiles(test_smi, smile_max_length)
Z = np.zeros((1, smile_max_length, len(char_list)), dtype=np.bool)
for t, char in enumerate(test_smi):
Z[0, t, char_to_index[char]] = 1
test_smi
Z.shape
string = ""
for i in autoencoder.predict(Z):
for j in i:
index = sample(j)
string += index_to_char[index]
print("\n callback guess: " + string)
cations_with_proper_chars = []
for smi in cations:
if set(smi).issubset(char_list):
cations_with_proper_chars.append(smi)
len(cations_with_proper_chars)
cation_samples = []
for smi_index, smi in enumerate(cations_with_proper_chars):
smi = pad_smiles(smi, smile_max_length)
Z = np.zeros((1, smile_max_length, len(char_list)), dtype=np.bool)
for t, char in enumerate(smi):
Z[0, t, char_to_index[char]] = 1
string = ""
for i in autoencoder.predict(Z):
for j in i:
index = sample(j, temperature=0.5)
string += index_to_char[index]
cation_samples.append(string)
print('sampled cations: {}'.format(len(cation_samples)))
print('unique samples: {}'.format(pd.DataFrame(cation_samples).drop_duplicates().shape[0]))
from rdkit import Chem
from rdkit.Chem import Draw
% matplotlib inline
for smi in cation_samples:
try:
Draw.MolToMPL(Chem.MolFromSmiles(smi))
print(smi)
except:
pass
cation_samples
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: only N+ contain positive charges in this dataset
Step2: We may want to remove cations with more than 25 heavy atoms
Step3: so some keras version stuff. 1.0 uses keras.losses to store its loss functions. 2.0 uses objectives. we'll just have to be consistent
Step4: Here I've adapted the exact architecture used in the paper
Step5: encoded_input looks like a dummy layer here
Step6: create a separate autoencoder model that combines the encoder and decoder (I guess the former cells are for accessing those separate parts of the model)
Step7: we compile and fit
Step8: Alright. So now I'm going to loop through our 276 cations, sample 100x from the decoder based on these representations, and see how many sanitize with the RDKit
Step9: so we had to remove 25 cations
|
6,294
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import pytz
import inspect
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import tmpo
from opengrid import config
from opengrid.library import plotting
from opengrid.library import houseprint
c=config.Config()
%matplotlib inline
plt.rcParams['figure.figsize'] = 16,8
# path to data
path_to_data = c.get('data', 'folder')
if not os.path.exists(path_to_data):
raise IOError("Provide your path to the data in your config.ini file. This is a folder containing a 'zip' and 'csv' subfolder.")
hp = houseprint.Houseprint()
#set start and end date
end = pd.Timestamp('2015/1/4')
start = end - dt.timedelta(days=60)
hp.init_tmpo()
def dif_interp(ts, freq='min', start=None, end=None):
Return a fixed frequency discrete difference time series from an unevenly spaced cumulative time series
if ts.empty and (start is None or end is None):
return ts
start = start or ts.index[0]
start = start.replace(tzinfo=pytz.utc)
end = end or max(start, ts.index[-1])
end = end.replace(tzinfo=pytz.utc)
start = min(start, end)
newindex = pd.DataFrame([0, 0], index=[start, end]).resample(freq).index
if ts.dropna().empty:
tsmin = ts.reindex(newindex)
else:
tsmin = ts.reindex(ts.index + newindex)
tsmin = tsmin.interpolate(method='time')
tsmin = tsmin.reindex(newindex)
return tsmin.diff()*3600/60
df = hp.get_data(sensortype='water', head=start, tail=end).diff()
water_sensors = [sensor for sensor in hp.get_sensors('water') if sensor.key in df.columns]
print "{} water sensors".format(len(water_sensors))
for sensor in water_sensors:
ts = df[sensor.key]
if not ts.dropna().empty:
plotting.carpet(ts, title=sensor.device.key, zlabel=r'Flow [l/hour]')
for sensor in water_sensors:
ts = df[sensor.key]
if not ts.dropna().empty:
tsday = ts.resample('D', how='sum')
tsday.plot(label=sensor.device.key)
(tsday*0.+1000.).plot(style='--', lw=3, label='_nolegend_')
plt.legend()
for sensor in water_sensors:
ts = df[sensor.key]
if not ts.dropna().empty:
plt.figure()
for day in pd.date_range(start, end):
try:
tsday = ts[day.strftime('%Y/%m/%d')].order(ascending=False) * 60.
plt.plot(tsday.values/60.)
x = np.arange(len(tsday.values)) + 10.
plt.plot(x + 100., 500./x**1.5, 'k--')
plt.gca().set_yscale('log')
plt.ylim(ymin=1/60.)
plt.title(sensor.device.key)
except:
pass
for sensor in water_sensors:
ts = df[sensor.key] * 60.
if not ts.dropna().empty:
tsday = pd.rolling_min(ts, 60)
ax = tsday.plot(label=sensor.device.key)
(tsday*0.+20.).plot(style='--', lw=3, label='_nolegend_')
plt.gca().set_yscale('log')
ax.set_ylim(ymin=1)
plt.legend()
for sensor in water_sensors:
ts = df[sensor.key] * 60.
if not ts.dropna().empty:
tsday = pd.rolling_min(ts, 60) - 0.4*pd.rolling_mean(ts, 60)
ax = tsday.plot(label=sensor.device.key)
(tsday*0.+1.).plot(style='--', lw=3, label='_nolegend_')
ax.set_yscale('log')
ax.set_ylim(ymin=0.1)
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook shows step by step how water leaks of different severity can be detected
Step2: The purpose is to automatically detect leaks, undesired high consumption, etc.. so we can warn the user
Step3: Yes, we do! The most obvious is FL03001579 with a more or less constant leak in the first month and later on some very large leaks during several hours. FL03001556 has a moderate leak once and seems to have similar, but less severe leaks later again. Also in FL03001561 there was once a strange (but rather short) issue and later on small, stubborn and irregularly deteriorating leaks of a different kind.
Step4: So, the big water leaks of FL03001579 is relatively easy to detect, e.g. by raising an alarm as soon as the daily consumption exceeds 1500 l. However, by that time a lot of water has been wasted already. One could lower the threshold a bit, but below 1000l a false alarm would be raised for FL03001525 on one day. Moreover, the other issues are not detected by such an alarm.
Step5: This way, most of the issues could be detected, but some marginally. For small leaks it may take a full day before the alarm is raised.
Step6: The large leaks are very pronounced and easily detected (remark that this is a logarithmic scale!) one hour after the leak started. But the smaller leaks are still not visible.
|
6,295
|
<ASSISTANT_TASK:>
Python Code:
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import numpy as np
from matplotlib import pyplot as plt
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("We are using the following device for learning:",device)
batch_size_train = 60000 # Samples per Training Batch
batch_size_test = 10000 # just create one large test dataset (MNIST test dataset has 10.000 Samples)
# Get Training and Test Dataset with a Dataloader
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./files/', train=True, download=True,
transform=torchvision.transforms.Compose([torchvision.transforms.ToTensor()])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./files/', train=False, download=True,
transform=torchvision.transforms.Compose([torchvision.transforms.ToTensor()])),
batch_size=batch_size_test, shuffle=True)
# We are only interessted in the data and not in the targets
for idx, (data, targets) in enumerate(train_loader):
x_train = data[:,0,:,:]
for idx, (data, targets) in enumerate(test_loader):
x_test = data[:,0,:,:]
image_size = x_train.shape[1]
x_test_flat = torch.reshape(x_test, (x_test.shape[0], image_size*image_size))
plt.figure(figsize=(16,2))
for k in range(8):
plt.subplot(1,8,k+1)
plt.imshow(x_train[np.random.randint(x_train.shape[0])], interpolation='nearest', cmap='binary')
plt.xticks(())
plt.yticks(())
hidden_encoder_1 = 500
hidden_encoder_2 = 250
hidden_encoder_3 = 100
hidden_encoder = [hidden_encoder_1, hidden_encoder_2, hidden_encoder_3]
hidden_decoder_1 = 100
hidden_decoder_2 = 250
hidden_decoder_3 = 500
hidden_decoder = [hidden_decoder_1, hidden_decoder_2, hidden_decoder_3]
class Autoencoder(nn.Module):
def __init__(self, hidden_encoder, hidden_decoder, image_size, bit_per_image):
super(Autoencoder, self).__init__()
# Define Transmitter Layer: Linear function, M input neurons (symbols), 2 output neurons (real and imaginary part)
self.We1 = nn.Linear(image_size*image_size, hidden_encoder[0])
self.We2 = nn.Linear(hidden_encoder[0], hidden_encoder[1])
self.We3 = nn.Linear(hidden_encoder[1], hidden_encoder[2])
self.We4 = nn.Linear(hidden_encoder[2], bit_per_image)
# Define Receiver Layer: Linear function, 2 input neurons (real and imaginary part), M output neurons (symbols)
self.Wd1 = nn.Linear(bit_per_image,hidden_decoder[0])
self.Wd2 = nn.Linear(hidden_decoder[0], hidden_decoder[1])
self.Wd3 = nn.Linear(hidden_decoder[1], hidden_decoder[2])
self.Wd4 = nn.Linear(hidden_decoder[2], image_size*image_size)
# Non-linearity (used in transmitter and receiver)
self.activation_function = nn.ELU()
self.sigmoid = nn.Sigmoid()
self.softsign = nn.Softsign()
def forward(self, training_data, Pe):
encoded = self.encoder(training_data)
# random binarization in training
ti = encoded.clone()
compressed = ti + (self.binarizer(ti) - ti).detach()
# add error pattern (flip the bit or not)
error_tensor = torch.distributions.Bernoulli(Pe * torch.ones_like(compressed)).sample()
received = torch.mul( compressed, 1 - 2*error_tensor)
reconstructed = self.decoder(received)
return reconstructed
def test(self, valid_data, Pe):
encoded_test = self.encoder(valid_data)
compressed_test = self.binarizer(encoded_test)
error_tensor_test = torch.distributions.Bernoulli(Pe * torch.ones_like(compressed_test)).sample()
received_test = torch.mul( compressed_test, 1 - 2*error_tensor_test )
reconstructed_test = self.decoder(received_test)
loss_test = torch.mean(torch.square(valid_data - reconstructed_test))
reconstructed_test_noerror = self.decoder(compressed_test)
return reconstructed_test
def encoder(self, batch):
temp = self.activation_function(self.We1(batch))
temp = self.activation_function(self.We2(temp))
temp = self.activation_function(self.We3(temp))
output = self.softsign(self.We4(temp))
return output
def decoder(self, batch):
temp = self.activation_function(self.Wd1(batch))
temp = self.activation_function(self.Wd2(temp))
temp = self.activation_function(self.Wd3(temp))
output = self.sigmoid(self.Wd4(temp))
return output
def binarizer(self, input):
# This is the stochastic quatizer which we use for the training
return torch.sign(input)
def get_batch(x, batch_size):
idxs = np.random.randint(0, x.shape[0], (batch_size))
return torch.stack([torch.reshape(x[k], (-1,)) for k in idxs])
batch_size = 250
Pe_range = np.array([0, 0.01, 0.1, 0.2])
bit_range = np.array([5, 10, 20, 30, 40, 50, 60, 70, 80, 100])
SNR_result = np.zeros( (len(Pe_range), len(bit_range)) )
# Mean Squared Error loss
loss_fn = nn.MSELoss()
for i in range(len(Pe_range)):
for j in range(len(bit_range)):
best_SNR = -9999;
print('Initializing ....')
model = Autoencoder(hidden_encoder, hidden_decoder, image_size, bit_range[j])
model.to(device)
# Adam Optimizer
optimizer = optim.Adam(model.parameters())
print('Start Training') # Training loop
for it in range(100000): # Original paper does 50k iterations
mini_batch = torch.Tensor(get_batch(x_train, batch_size)).to(device)
# Propagate (training) data through the net
reconstructed = model(mini_batch, Pe_range[i])
# compute loss
loss = loss_fn(mini_batch, reconstructed)
# compute gradients
loss.backward()
# Adapt weights
optimizer.step()
# reset gradients
optimizer.zero_grad()
# Evaulation with the test data
if it % 500 == 0:
reconstructed_test = model.test(x_test_flat.to(device), Pe_range[i])
noise = torch.mean(torch.square(x_test_flat.to(device) - reconstructed_test))
SNR = 10.0 * (torch.log(torch.mean(torch.square(x_test_flat.to(device)))) - torch.log(noise)) / np.log(10.0)
cur_SNR = SNR.detach().cpu().numpy().squeeze()
if cur_SNR > best_SNR:
best_SNR = cur_SNR
if it % 10000 == 0:
print('Pe = %1.2f, bits = %d, It %d: (best SNR: %1.4f dB)' % (Pe_range[i], bit_range[j], it, best_SNR))
SNR_result[i,j] = best_SNR
print('Finished learning for e = %1.2f, bits = %d. Best SNR: %1.4f' % (Pe_range[i], bit_range[j], best_SNR))
print('Training finished')
np.savetxt('SNR_result_simple.txt', SNR_result, delimiter=',')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import and load MNIST dataset (Preprocessing)
Step2: Plot 8 random images
Step3: Specify Autoencoder
Step4: Helper function to get a random mini-batch of images
Step5: Perform the training
|
6,296
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# use seaborn's default plotting styles for matplotlib
import seaborn; seaborn.set()
from gatspy.datasets import fetch_rrlyrae
rrlyrae = fetch_rrlyrae()
lcid = rrlyrae.ids[0]
t, y, dy, filts = rrlyrae.get_lightcurve(lcid)
period = rrlyrae.get_metadata(lcid)['P']
for filt in 'ugriz':
mask = (filts == filt)
plt.errorbar(t[mask] % period, y[mask], dy[mask], fmt='.', label=filt)
plt.gca().invert_yaxis()
plt.legend(ncol=3, loc='upper left');
from gatspy.periodic import LombScargleMultiband
model = LombScargleMultiband(Nterms_base=1, Nterms_band=0)
model.fit(t, y, dy, filts)
periods = np.linspace(period - 0.1, period + 0.1, 2000)
power = model.periodogram(periods)
plt.plot(periods, power, lw=1)
plt.xlim(periods[0], periods[-1]);
def plot_model(model, lcid):
t, y, dy, filts = rrlyrae.get_lightcurve(lcid)
model.fit(t, y, dy, filts)
tfit = np.linspace(0, period, 1000)
for filt in 'ugriz':
mask = (filts == filt)
eb = plt.errorbar(t[mask] % period, y[mask], dy[mask], fmt='.', label=filt)
yfit = model.predict(tfit, filt, period=period)
plt.plot(tfit, yfit, color=eb[0].get_color())
plt.gca().invert_yaxis()
plt.legend(ncol=3, loc='upper left')
plot_model(LombScargleMultiband(Nterms_base=1, Nterms_band=0), lcid)
plot_model(LombScargleMultiband(Nterms_base=4, Nterms_band=1), lcid)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we'll fit the multiband periodogram model to this data. For more information on the model, refer to the VanderPlas and Ivezic paper mentioned above.
Step2: We can see what the multiterm model looks like by plotting it over the data
Step3: If we'd like to do a higher-oder multiterm model, we can simply adjust the number of terms in the base and band models
|
6,297
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc("image", aspect="equal")
import SimpleITK as sitk
# Download data to work on
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
img = sitk.GaussianSource(size=[64] * 2)
plt.imshow(sitk.GetArrayViewFromImage(img))
img = sitk.GaborSource(size=[64] * 2, frequency=0.03)
plt.imshow(sitk.GetArrayViewFromImage(img))
def myshow(img):
nda = sitk.GetArrayViewFromImage(img)
plt.imshow(nda)
myshow(img)
img[24, 24]
myshow(img[16:48, :])
myshow(img[:, 16:-16])
myshow(img[:32, :32])
img_corner = img[:32, :32]
myshow(img_corner)
myshow(img_corner[::-1, :])
myshow(
sitk.Tile(
img_corner,
img_corner[::-1, ::],
img_corner[::, ::-1],
img_corner[::-1, ::-1],
[2, 2],
)
)
img = sitk.GaborSource(size=[64] * 3, frequency=0.05)
# Why does this produce an error?
myshow(img)
myshow(img[:, :, 32])
myshow(img[16, :, :])
myshow(img[:, ::3, 32])
img = sitk.ReadImage(fdata("cthead1.png"))
img = sitk.Cast(img, sitk.sitkFloat32)
myshow(img)
img[150, 150]
timg = img**2
myshow(timg)
timg[150, 150]
img = sitk.ReadImage(fdata("cthead1.png"))
myshow(img)
img = sitk.ReadImage(fdata("cthead1.png"))
myshow(img)
myshow(img > 90)
myshow(img > 150)
myshow((img > 90) + (img > 150))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us begin by developing a convenient method for displaying images in our notebooks.
Step2: Multi-dimension slice indexing
Step3: Cropping
Step4: Flipping
Step5: Slice Extraction
Step6: Subsampling
Step7: Mathematical Operators
Step8: Division Operators
Step9: Comparative Operators
Step10: Amazingly make common trivial tasks really trivial
|
6,298
|
<ASSISTANT_TASK:>
Python Code:
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n' \
'You should consider updating to Python 3.4.0 or ' \
'higher as the libraries built for this course ' \
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda '
'and then restart `jupyter notebook`:\n' \
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
except ImportError:
print('You are missing some packages! ' \
'We will try installing them before continuing!')
!pip install "numpy>=1.11.0" "matplotlib>=1.5.1" "scikit-image>=0.11.3" "scikit-learn>=0.17"
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
print('Done!')
# Import Tensorflow
try:
import tensorflow as tf
except ImportError:
print("You do not have tensorflow installed!")
print("Follow the instructions on the following link")
print("to install tensorflow before continuing:")
print("")
print("https://github.com/pkmital/CADL#installation-preliminaries")
# This cell includes the provided libraries from the zip file
try:
from libs import utils
except ImportError:
print("Make sure you have started notebook in the same directory" +
" as the provided zip file which includes the 'libs' folder" +
" and the file 'utils.py' inside of it. You will NOT be able"
" to complete this assignment unless you restart jupyter"
" notebook inside the directory created by extracting"
" the zip file or cloning the github repo.")
# We'll tell matplotlib to inline any drawn figures like so:
%matplotlib inline
plt.style.use('ggplot')
# Bit of formatting because inline code is not styled very good by default:
from IPython.core.display import HTML
HTML(<style> .rendered_html code {
padding: 2px 4px;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
} </style>)
# You need to find 100 images from the web/create them yourself
# or find a dataset that interests you (e.g. I used celeb faces
# in the course lecture...)
# then store them all in a single directory.
# With all the images in a single directory, you can then
# perform the following steps to create a 4-d array of:
# N x H x W x C dimensions as 100 x 100 x 100 x 3.
dirname = ...
# Load every image file in the provided directory
filenames = [os.path.join(dirname, fname)
for fname in os.listdir(dirname)]
# Make sure we have exactly 100 image files!
filenames = filenames[:100]
assert(len(filenames) == 100)
# Read every filename as an RGB image
imgs = [plt.imread(fname)[..., :3] for fname in filenames]
# Crop every image to a square
imgs = [utils.imcrop_tosquare(img_i) for img_i in imgs]
# Then resize the square image to 100 x 100 pixels
imgs = [resize(img_i, (100, 100)) for img_i in imgs]
# Finally make our list of 3-D images a 4-D array with the first dimension the number of images:
imgs = np.array(imgs).astype(np.float32)
# Plot the resulting dataset:
# Make sure you "run" this cell after you create your `imgs` variable as a 4-D array!
# Make sure we have a 100 x 100 x 100 x 3 dimension array
assert(imgs.shape == (100, 100, 100, 3))
plt.figure(figsize=(10, 10))
plt.imshow(utils.montage(imgs, saveto='dataset.png'))
# First create a tensorflow session
sess = ...
# Now create an operation that will calculate the mean of your images
mean_img_op = ...
# And then run that operation using your session
mean_img = sess.run(mean_img_op)
# Then plot the resulting mean image:
# Make sure the mean image is the right size!
assert(mean_img.shape == (100, 100, 3))
plt.figure(figsize=(10, 10))
plt.imshow(mean_img)
plt.imsave(arr=mean_img, fname='mean.png')
# Create a tensorflow operation to give you the standard deviation
# First compute the difference of every image with a
# 4 dimensional mean image shaped 1 x H x W x C
mean_img_4d = ...
subtraction = imgs - mean_img_4d
# Now compute the standard deviation by calculating the
# square root of the expected squared differences
std_img_op = tf.sqrt(tf.reduce_mean(subtraction * subtraction, axis=0))
# Now calculate the standard deviation using your session
std_img = sess.run(std_img_op)
# Then plot the resulting standard deviation image:
# Make sure the std image is the right size!
assert(std_img.shape == (100, 100) or std_img.shape == (100, 100, 3))
plt.figure(figsize=(10, 10))
std_img_show = std_img / np.max(std_img)
plt.imshow(std_img_show)
plt.imsave(arr=std_img_show, fname='std.png')
norm_imgs_op = ...
norm_imgs = sess.run(norm_imgs_op)
print(np.min(norm_imgs), np.max(norm_imgs))
print(imgs.dtype)
# Then plot the resulting normalized dataset montage:
# Make sure we have a 100 x 100 x 100 x 3 dimension array
assert(norm_imgs.shape == (100, 100, 100, 3))
plt.figure(figsize=(10, 10))
plt.imshow(utils.montage(norm_imgs, 'normalized.png'))
norm_imgs_show = (norm_imgs - np.min(norm_imgs)) / (np.max(norm_imgs) - np.min(norm_imgs))
plt.figure(figsize=(10, 10))
plt.imshow(utils.montage(norm_imgs_show, 'normalized.png'))
# First build 3 kernels for each input color channel
ksize = ...
kernel = np.concatenate([utils.gabor(ksize)[:, :, np.newaxis] for i in range(3)], axis=2)
# Now make the kernels into the shape: [ksize, ksize, 3, 1]:
kernel_4d = ...
assert(kernel_4d.shape == (ksize, ksize, 3, 1))
plt.figure(figsize=(5, 5))
plt.imshow(kernel_4d[:, :, 0, 0], cmap='gray')
plt.imsave(arr=kernel_4d[:, :, 0, 0], fname='kernel.png', cmap='gray')
convolved = utils.convolve(...
convolved_show = (convolved - np.min(convolved)) / (np.max(convolved) - np.min(convolved))
print(convolved_show.shape)
plt.figure(figsize=(10, 10))
plt.imshow(utils.montage(convolved_show[..., 0], 'convolved.png'), cmap='gray')
# Create a set of operations using tensorflow which could
# provide you for instance the sum or mean value of every
# image in your dataset:
# First flatten our convolved images so instead of many 3d images,
# we have many 1d vectors.
# This should convert our 4d representation of N x H x W x C to a
# 2d representation of N x (H*W*C)
flattened = tf.reshape(convolved...
assert(flattened.get_shape().as_list() == [100, 10000])
# Now calculate some statistics about each of our images
values = tf.reduce_sum(flattened, axis=1)
# Then create another operation which sorts those values
# and then calculate the result:
idxs_op = tf.nn.top_k(values, k=100)[1]
idxs = sess.run(idxs_op)
# Then finally use the sorted indices to sort your images:
sorted_imgs = np.array([imgs[idx_i] for idx_i in idxs])
# Then plot the resulting sorted dataset montage:
# Make sure we have a 100 x 100 x 100 x 3 dimension array
assert(sorted_imgs.shape == (100, 100, 100, 3))
plt.figure(figsize=(10, 10))
plt.imshow(utils.montage(sorted_imgs, 'sorted.png'))
utils.build_submission('session-1.zip',
('dataset.png',
'mean.png',
'std.png',
'normalized.png',
'kernel.png',
'convolved.png',
'sorted.png',
'session-1.ipynb'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Session 1 - Introduction to Tensorflow
Step2: Places your images in a folder such as dirname = '/Users/Someone/Desktop/ImagesFromTheInternet'. We'll then use the os package to load them and crop/resize them to a standard size of 100 x 100 pixels.
Step3: <a name="part-two---compute-the-mean"></a>
Step4: Once you have seen the mean image of your dataset, how does it relate to your own expectations of the dataset? Did you expect something different? Was there something more "regular" or "predictable" about your dataset that the mean image did or did not reveal? If your mean image looks a lot like something recognizable, it's a good sign that there is a lot of predictability in your dataset. If your mean image looks like nothing at all, a gray blob where not much seems to stand out, then it's pretty likely that there isn't very much in common between your images. Neither is a bad scenario. Though, it is more likely that having some predictability in your mean image, e.g. something recognizable, that there are representations worth exploring with deeper networks capable of representing them. However, we're only using 100 images so it's a very small dataset to begin with.
Step5: Once you have plotted your dataset's standard deviation per pixel, what does it reveal about your dataset? Like with the mean image, you should consider what is predictable and not predictable about this image.
Step6: We apply another type of normalization to 0-1 just for the purposes of plotting the image. If we didn't do this, the range of our values would be somewhere between -1 and 1, and matplotlib would not be able to interpret the entire range of values. By rescaling our -1 to 1 valued images to 0-1, we can visualize it better.
Step7: <a name="part-five---convolve-the-dataset"></a>
Step8: We'll Perform the convolution with the 4d tensor in kernel_4d. This is a ksize x ksize x 3 x 1 tensor, where each input color channel corresponds to one filter with 1 output. Each filter looks like
Step9: Perform the convolution with the 4d tensors
Step10: What we've just done is build a "hand-crafted" feature detector
Step11: What does your sorting reveal? Could you imagine the same sorting over many more images reveal the thing your dataset sought to represent? It is likely that the representations that you wanted to find hidden within "higher layers", i.e., "deeper features" of the image, and that these "low level" features, edges essentially, are not very good at describing the really interesting aspects of your dataset. In later sessions, we'll see how we can combine the outputs of many more convolution kernels that have been assembled in a way that accentuate something very particular about each image, and build a sorting that is much more intelligent than this one!
|
6,299
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import bigquery
from datetime import datetime
import matplotlib.pyplot as plt, seaborn as sns
PROJECT_ID = 'yourProject' # Change to your project.
!gcloud config set project $PROJECT_ID
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
import matplotlib.pyplot as plt, seaborn as sns
%%bigquery --project $PROJECT_ID
CREATE OR REPLACE TABLE recommendations.valid_items
AS
SELECT
item_Id,
COUNT(group_Id) AS item_frequency
FROM recommendations.vw_item_groups
GROUP BY item_Id
HAVING item_frequency >= 15;
SELECT COUNT(*) item_count FROM recommendations.valid_items;
%%bigquery --project $PROJECT_ID
CREATE OR REPLACE TABLE recommendations.valid_groups
AS
SELECT
group_Id,
COUNT(item_Id) AS group_size
FROM recommendations.vw_item_groups
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
GROUP BY group_Id
HAVING group_size BETWEEN 2 AND 100;
SELECT COUNT(*) group_count FROM recommendations.valid_groups;
%%bigquery --project $PROJECT_ID
SELECT COUNT(*) record_count
FROM `recommendations.vw_item_groups`
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
AND group_Id IN (SELECT group_Id FROM recommendations.valid_groups);
%%bigquery size_distribution --project $PROJECT_ID
WITH group_sizes
AS
(
SELECT
group_Id,
ML.BUCKETIZE(
COUNT(item_Id), [10, 20, 30, 40, 50, 101])
AS group_size
FROM `recommendations.vw_item_groups`
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
AND group_Id IN (SELECT group_Id FROM recommendations.valid_groups)
GROUP BY group_Id
)
SELECT
CASE
WHEN group_size = 'bin_1' THEN '[1 - 10]'
WHEN group_size = 'bin_2' THEN '[10 - 20]'
WHEN group_size = 'bin_3' THEN '[20 - 30]'
WHEN group_size = 'bin_4' THEN '[30 - 40]'
WHEN group_size = 'bin_5' THEN '[40 - 50]'
ELSE '[50 - 100]'
END AS group_size,
CASE
WHEN group_size = 'bin_1' THEN 1
WHEN group_size = 'bin_2' THEN 2
WHEN group_size = 'bin_3' THEN 3
WHEN group_size = 'bin_4' THEN 4
WHEN group_size = 'bin_5' THEN 5
ELSE 6
END AS bucket_Id,
COUNT(group_Id) group_count
FROM group_sizes
GROUP BY group_size, bucket_Id
ORDER BY bucket_Id
plt.figure(figsize=(20,5))
q = sns.barplot(x='group_size', y='group_count', data=size_distribution)
%%bigquery occurrence_distribution --project $PROJECT_ID
WITH item_frequency
AS
(
SELECT
Item_Id,
ML.BUCKETIZE(
COUNT(group_Id)
, [15, 30, 50, 100, 200, 300, 400]) AS group_count
FROM `recommendations.vw_item_groups`
WHERE item_Id IN (SELECT item_Id FROM recommendations.valid_items)
AND group_Id IN (SELECT group_Id FROM recommendations.valid_groups)
GROUP BY Item_Id
)
SELECT
CASE
WHEN group_count = 'bin_1' THEN '[15 - 30]'
WHEN group_count = 'bin_2' THEN '[30 - 50]'
WHEN group_count = 'bin_3' THEN '[50 - 100]'
WHEN group_count = 'bin_4' THEN '[100 - 200]'
WHEN group_count = 'bin_5' THEN '[200 - 300]'
WHEN group_count = 'bin_6' THEN '[300 - 400]'
ELSE '[400+]'
END AS group_count,
CASE
WHEN group_count = 'bin_1' THEN 1
WHEN group_count = 'bin_2' THEN 2
WHEN group_count = 'bin_3' THEN 3
WHEN group_count = 'bin_4' THEN 4
WHEN group_count = 'bin_5' THEN 5
WHEN group_count = 'bin_6' THEN 6
ELSE 7
END AS bucket_Id,
COUNT(Item_Id) item_count
FROM item_frequency
GROUP BY group_count, bucket_Id
ORDER BY bucket_Id
plt.figure(figsize=(20, 5))
q = sns.barplot(x='group_count', y='item_count', data=occurrence_distribution)
%%bigquery --project $PROJECT_ID
DROP TABLE IF EXISTS recommendations.valid_items;
%%bigquery --project $PROJECT_ID
DROP TABLE IF EXISTS recommendations.valid_groups;
%%bigquery --project $PROJECT_ID
DECLARE min_item_frequency INT64;
DECLARE max_group_size INT64;
SET min_item_frequency = 15;
SET max_group_size = 100;
CALL recommendations.sp_ComputePMI(min_item_frequency, max_group_size);
%%bigquery --project $PROJECT_ID
SELECT
a.item1_Id,
a.item2_Id,
b.frequency AS freq1,
c.frequency AS freq2,
a.cooc,
a.pmi,
a.cooc * a.pmi AS score
FROM recommendations.item_cooc a
JOIN recommendations.item_frequency b
ON a.item1_Id = b.item_Id
JOIN recommendations.item_frequency c
ON a.item2_Id = c.item_Id
WHERE a.item1_Id != a.item2_Id
ORDER BY score DESC
LIMIT 10;
%%bigquery --project $PROJECT_ID
SELECT COUNT(*) records_count
FROM recommendations.item_cooc
%%bigquery --project $PROJECT_ID
DECLARE dimensions INT64 DEFAULT 50;
CALL recommendations.sp_TrainItemMatchingModel(dimensions)
%%bigquery song_embeddings --project $PROJECT_ID
SELECT
feature,
processed_input,
factor_weights,
intercept
FROM
ML.WEIGHTS(MODEL recommendations.item_matching_model)
WHERE
feature IN ('2114406',
'2114402',
'2120788',
'2120786',
'1086322',
'3129954',
'53448',
'887688',
'562487',
'833391',
'1098069',
'910683',
'1579481',
'2675403',
'2954929',
'625169')
songs = {
'2114406': 'Metallica: Nothing Else Matters',
'2114402': 'Metallica: The Unforgiven',
'2120788': 'Limp Bizkit: My Way',
'2120786': 'Limp Bizkit: My Generation',
'1086322': 'Jacques Brel: Ne Me Quitte Pas',
'3129954': 'Édith Piaf: Non, Je Ne Regrette Rien',
'53448': 'France Gall: Ella, Elle l\'a',
'887688': 'Enrique Iglesias: Tired Of Being Sorry',
'562487': 'Shakira: Hips Don\'t Lie',
'833391': 'Ricky Martin: Livin\' la Vida Loca',
'1098069': 'Snoop Dogg: Drop It Like It\'s Hot',
'910683': '2Pac: California Love',
'1579481': 'Dr. Dre: The Next Episode',
'2675403': 'Eminem: Lose Yourself',
'2954929': 'Black Sabbath: Iron Man',
'625169': 'Black Sabbath: Paranoid',
}
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
def process_results(results):
items = list(results['feature'].unique())
item_embeddings = dict()
for item in items:
emebedding = [0.0] * 100
embedding_pair = results[results['feature'] == item]
for _, row in embedding_pair.iterrows():
factor_weights = list(row['factor_weights'])
for _, element in enumerate(factor_weights):
emebedding[element['factor'] - 1] += element['weight']
item_embeddings[item] = emebedding
return item_embeddings
item_embeddings = process_results(song_embeddings)
item_ids = list(item_embeddings.keys())
for idx1 in range(0, len(item_ids) - 1):
item1_Id = item_ids[idx1]
title1 = songs[item1_Id]
print(title1)
print("==================")
embedding1 = np.array(item_embeddings[item1_Id])
similar_items = []
for idx2 in range(len(item_ids)):
item2_Id = item_ids[idx2]
title2 = songs[item2_Id]
embedding2 = np.array(item_embeddings[item2_Id])
similarity = round(cosine_similarity([embedding1], [embedding2])[0][0], 5)
similar_items.append((title2, similarity))
similar_items = sorted(similar_items, key=lambda item: item[1], reverse=True)
for element in similar_items[1:]:
print(f"- {element[0]}' = {element[1]}")
print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure GCP environment settings
Step2: Authenticate your GCP account
Step3: Explore the sample data
Step4: Count the number of songs that occur in at least 15 groups
Step5: Count the number of playlists that have between 2 and 100 items
Step6: Count the number of records with valid songs and playlists
Step7: Show the playlist size distribution
Step8: Show the song occurrence distribution
Step9: Compute song PMI data
Step10: View the song PMI data
Step11: Train the BigQuery ML matrix factorization model
Step12: Explore the trained embeddings
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.