Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
7,200
|
<ASSISTANT_TASK:>
Python Code:
%reset -sf
import logging
import os
import torch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
import pyro.optim as optim
pyro.set_rng_seed(1)
assert pyro.__version__.startswith('1.7.0')
%matplotlib inline
plt.style.use('default')
logging.basicConfig(format='%(message)s', level=logging.INFO)
smoke_test = ('CI' in os.environ)
pyro.set_rng_seed(1)
DATA_URL = "https://d2hg8soec8ck9v.cloudfront.net/datasets/rugged_data.csv"
rugged_data = pd.read_csv(DATA_URL, encoding="ISO-8859-1")
def model(is_cont_africa, ruggedness, log_gdp):
a = pyro.sample("a", dist.Normal(0., 10.))
b_a = pyro.sample("bA", dist.Normal(0., 1.))
b_r = pyro.sample("bR", dist.Normal(0., 1.))
b_ar = pyro.sample("bAR", dist.Normal(0., 1.))
sigma = pyro.sample("sigma", dist.Uniform(0., 10.))
mean = a + b_a * is_cont_africa + b_r * ruggedness + b_ar * is_cont_africa * ruggedness
with pyro.plate("data", len(ruggedness)):
pyro.sample("obs", dist.Normal(mean, sigma), obs=log_gdp)
def guide(is_cont_africa, ruggedness, log_gdp):
a_loc = pyro.param('a_loc', torch.tensor(0.))
a_scale = pyro.param('a_scale', torch.tensor(1.),
constraint=constraints.positive)
sigma_loc = pyro.param('sigma_loc', torch.tensor(1.),
constraint=constraints.positive)
weights_loc = pyro.param('weights_loc', torch.randn(3))
weights_scale = pyro.param('weights_scale', torch.ones(3),
constraint=constraints.positive)
a = pyro.sample("a", dist.Normal(a_loc, a_scale))
b_a = pyro.sample("bA", dist.Normal(weights_loc[0], weights_scale[0]))
b_r = pyro.sample("bR", dist.Normal(weights_loc[1], weights_scale[1]))
b_ar = pyro.sample("bAR", dist.Normal(weights_loc[2], weights_scale[2]))
sigma = pyro.sample("sigma", dist.Normal(sigma_loc, torch.tensor(0.05)))
mean = a + b_a * is_cont_africa + b_r * ruggedness + b_ar * is_cont_africa * ruggedness
# Utility function to print latent sites' quantile information.
def summary(samples):
site_stats = {}
for site_name, values in samples.items():
marginal_site = pd.DataFrame(values)
describe = marginal_site.describe(percentiles=[.05, 0.25, 0.5, 0.75, 0.95]).transpose()
site_stats[site_name] = describe[["mean", "std", "5%", "25%", "50%", "75%", "95%"]]
return site_stats
# Prepare training data
df = rugged_data[["cont_africa", "rugged", "rgdppc_2000"]]
df = df[np.isfinite(df.rgdppc_2000)]
df["rgdppc_2000"] = np.log(df["rgdppc_2000"])
train = torch.tensor(df.values, dtype=torch.float)
from pyro.infer import SVI, Trace_ELBO
svi = SVI(model,
guide,
optim.Adam({"lr": .05}),
loss=Trace_ELBO())
is_cont_africa, ruggedness, log_gdp = train[:, 0], train[:, 1], train[:, 2]
pyro.clear_param_store()
num_iters = 5000 if not smoke_test else 2
for i in range(num_iters):
elbo = svi.step(is_cont_africa, ruggedness, log_gdp)
if i % 500 == 0:
logging.info("Elbo loss: {}".format(elbo))
from pyro.infer import Predictive
num_samples = 1000
predictive = Predictive(model, guide=guide, num_samples=num_samples)
svi_samples = {k: v.reshape(num_samples).detach().cpu().numpy()
for k, v in predictive(log_gdp, is_cont_africa, ruggedness).items()
if k != "obs"}
for site, values in summary(svi_samples).items():
print("Site: {}".format(site))
print(values, "\n")
from pyro.infer import MCMC, NUTS
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_samples=1000, warmup_steps=200)
mcmc.run(is_cont_africa, ruggedness, log_gdp)
hmc_samples = {k: v.detach().cpu().numpy() for k, v in mcmc.get_samples().items()}
for site, values in summary(hmc_samples).items():
print("Site: {}".format(site))
print(values, "\n")
sites = ["a", "bA", "bR", "bAR", "sigma"]
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(12, 10))
fig.suptitle("Marginal Posterior density - Regression Coefficients", fontsize=16)
for i, ax in enumerate(axs.reshape(-1)):
site = sites[i]
sns.distplot(svi_samples[site], ax=ax, label="SVI (DiagNormal)")
sns.distplot(hmc_samples[site], ax=ax, label="HMC")
ax.set_title(site)
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right');
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
fig.suptitle("Cross-section of the Posterior Distribution", fontsize=16)
sns.kdeplot(hmc_samples["bA"], hmc_samples["bR"], ax=axs[0], shade=True, label="HMC")
sns.kdeplot(svi_samples["bA"], svi_samples["bR"], ax=axs[0], label="SVI (DiagNormal)")
axs[0].set(xlabel="bA", ylabel="bR", xlim=(-2.5, -1.2), ylim=(-0.5, 0.1))
sns.kdeplot(hmc_samples["bR"], hmc_samples["bAR"], ax=axs[1], shade=True, label="HMC")
sns.kdeplot(svi_samples["bR"], svi_samples["bAR"], ax=axs[1], label="SVI (DiagNormal)")
axs[1].set(xlabel="bR", ylabel="bAR", xlim=(-0.45, 0.05), ylim=(-0.15, 0.8))
handles, labels = axs[1].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right');
from pyro.infer.autoguide import AutoMultivariateNormal, init_to_mean
guide = AutoMultivariateNormal(model, init_loc_fn=init_to_mean)
svi = SVI(model,
guide,
optim.Adam({"lr": .01}),
loss=Trace_ELBO())
is_cont_africa, ruggedness, log_gdp = train[:, 0], train[:, 1], train[:, 2]
pyro.clear_param_store()
for i in range(num_iters):
elbo = svi.step(is_cont_africa, ruggedness, log_gdp)
if i % 500 == 0:
logging.info("Elbo loss: {}".format(elbo))
predictive = Predictive(model, guide=guide, num_samples=num_samples)
svi_mvn_samples = {k: v.reshape(num_samples).detach().cpu().numpy()
for k, v in predictive(log_gdp, is_cont_africa, ruggedness).items()
if k != "obs"}
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(12, 10))
fig.suptitle("Marginal Posterior density - Regression Coefficients", fontsize=16)
for i, ax in enumerate(axs.reshape(-1)):
site = sites[i]
sns.distplot(svi_mvn_samples[site], ax=ax, label="SVI (Multivariate Normal)")
sns.distplot(hmc_samples[site], ax=ax, label="HMC")
ax.set_title(site)
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right');
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
fig.suptitle("Cross-sections of the Posterior Distribution", fontsize=16)
sns.kdeplot(svi_samples["bA"], svi_samples["bR"], ax=axs[0], label="SVI (Diagonal Normal)")
sns.kdeplot(svi_mvn_samples["bA"], svi_mvn_samples["bR"], ax=axs[0], shade=True, label="SVI (Multivariate Normal)")
axs[0].set(xlabel="bA", ylabel="bR", xlim=(-2.5, -1.2), ylim=(-0.5, 0.1))
sns.kdeplot(svi_samples["bR"], svi_samples["bAR"], ax=axs[1], label="SVI (Diagonal Normal)")
sns.kdeplot(svi_mvn_samples["bR"], svi_mvn_samples["bAR"], ax=axs[1], shade=True, label="SVI (Multivariate Normal)")
axs[1].set(xlabel="bR", ylabel="bAR", xlim=(-0.45, 0.05), ylim=(-0.15, 0.8))
handles, labels = axs[1].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right');
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
fig.suptitle("Cross-sections of the Posterior Distribution", fontsize=16)
sns.kdeplot(hmc_samples["bA"], hmc_samples["bR"], ax=axs[0], shade=True, label="HMC")
sns.kdeplot(svi_mvn_samples["bA"], svi_mvn_samples["bR"], ax=axs[0], label="SVI (Multivariate Normal)")
axs[0].set(xlabel="bA", ylabel="bR", xlim=(-2.5, -1.2), ylim=(-0.5, 0.1))
sns.kdeplot(hmc_samples["bR"], hmc_samples["bAR"], ax=axs[1], shade=True, label="HMC")
sns.kdeplot(svi_mvn_samples["bR"], svi_mvn_samples["bAR"], ax=axs[1], label="SVI (Multivariate Normal)")
axs[1].set(xlabel="bR", ylabel="bAR", xlim=(-0.45, 0.05), ylim=(-0.15, 0.8))
handles, labels = axs[1].get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bayesian Linear Regression
Step2: SVI
Step3: Let us observe the posterior distribution over the different latent variables in the model.
Step4: HMC
Step5: Comparing Posterior Distributions
Step6: MultivariateNormal Guide
Step7: Let's look at the shape of the posteriors again. You can see the multivariate guide is able to capture more of the true posterior.
Step8: Now let's compare the posterior computed by the Diagonal Normal guide vs the Multivariate Normal guide. Note that the multivariate distribution is more dispresed than the Diagonal Normal.
Step9: and the Multivariate guide with the posterior computed by HMC. Note that the Multivariate guide better captures the true posterior.
|
7,201
|
<ASSISTANT_TASK:>
Python Code:
import openturns as ot
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
random_state = 123
np.random.seed(random_state)
from depimpact.tests import func_sum
help(func_sum)
dim = 2
margins = [ot.Normal()]*dim
families = np.zeros((dim, dim), dtype=int)
families[1, 0] = 1
from depimpact import ConservativeEstimate
quant_estimate = ConservativeEstimate(model_func=func_sum, margins=margins, families=families)
n = 1000
indep_result = quant_estimate.independence(n_input_sample=n, random_state=random_state)
from depimpact import quantile_func
alpha = 0.05
q_func = quantile_func(alpha)
indep_result.q_func = q_func
sns.jointplot(indep_result.input_sample[:, 0], indep_result.input_sample[:, 1]);
h = sns.distplot(indep_result.output_sample_id, axlabel='Model output', label="Output Distribution")
plt.plot([indep_result.quantity]*2, h.get_ylim(), label='Quantile at %d%%' % (alpha*100))
plt.legend(loc=0)
print('Output quantile :', indep_result.quantity)
indep_result.compute_bootstrap(n_bootstrap=5000)
sns.distplot(indep_result.bootstrap_sample, axlabel='Output quantile');
ci = [0.025, 0.975]
quantity_ci = indep_result.compute_quantity_bootstrap_ci(ci)
h = sns.distplot(indep_result.output_sample_id, axlabel='Model output', label="Output Distribution")
plt.plot([indep_result.quantity]*2, h.get_ylim(), 'g-', label='Quantile at %d%%' % (alpha*100))
plt.plot([quantity_ci[0]]*2, h.get_ylim(), 'g--', label='%d%% confidence intervals' % ((1. - (ci[0] + 1. - ci[1]))*100))
plt.plot([quantity_ci[1]]*2, h.get_ylim(), 'g--')
plt.legend(loc=0)
print('Quantile at independence: %.2f with a C.O.V at %.1f %%' % (indep_result.boot_mean, indep_result.boot_cov))
K = 20
n = 10000
grid_type = 'lhs'
dep_measure = 'parameter'
grid_result = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type, dep_measure=dep_measure,
random_state=random_state)
print('The computation did %d model evaluations.' % (grid_result.n_evals))
grid_result.q_func = q_func
min_result = grid_result.min_result
print('Minimum quantile: {} at param: {}'.format(min_result.quantity, min_result.dep_param))
plt.plot(grid_result.dep_params, grid_result.quantities, '.', label='Quantiles')
plt.plot(min_result.dep_param[0], min_result.quantity, 'ro', label='minimum')
plt.xlabel('Dependence parameter')
plt.ylabel('Quantile value')
plt.legend(loc=0);
grid_result.compute_bootstraps(n_bootstrap=500)
boot_min_quantiles = grid_result.bootstrap_samples.min(axis=0)
boot_argmin_quantiles = grid_result.bootstrap_samples.argmin(axis=0).ravel().tolist()
boot_min_params = [grid_result.dep_params[idx][0] for idx in boot_argmin_quantiles]
fig, axes = plt.subplots(1, 2, figsize=(14, 5))
sns.distplot(boot_min_quantiles, axlabel="Minimum quantiles", ax=axes[0])
sns.distplot(boot_min_params, axlabel="Parameters of the minimum", ax=axes[1])
# The parameter with most occurence
boot_id_min = max(set(boot_argmin_quantiles), key=boot_argmin_quantiles.count)
boot_min_result = grid_result[boot_id_min]
boot_mean = boot_min_result.bootstrap_sample.mean()
boot_std = boot_min_result.bootstrap_sample.std()
print('Worst Quantile: {} at {} with a C.O.V of {} %'.format(boot_min_result.boot_mean, min_result.dep_param, boot_min_result.boot_cov*100.))
plt.plot(grid_result.kendalls, grid_result.quantities, '.', label='Quantiles')
plt.plot(min_result.kendall_tau, min_result.quantity, 'ro', label='Minimum quantile')
plt.xlabel("Kendall's tau")
plt.ylabel('Quantile')
plt.legend(loc=0);
bounds_tau = np.asarray([[0., 0.7], [0.1, 0.]])
quant_estimate.bounds_tau = bounds_tau
K = 20
n = 10000
grid_type = 'lhs'
grid_result = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type, random_state=random_state)
grid_result.q_func = q_func
min_result = grid_result.min_result
print('Minimum quantile: {} at param: {}'.format(min_result.quantity, min_result.dep_param))
plt.plot(grid_result.dep_params, grid_result.quantities, '.', label='Quantiles')
plt.plot(min_result.dep_param[0], min_result.quantity, 'ro', label='minimum')
plt.xlabel('Dependence parameter')
plt.ylabel('Quantile value')
plt.legend(loc=0);
filename = './result.hdf'
grid_result.to_hdf(filename)
from dependence import ListDependenceResult
load_grid_result = ListDependenceResult.from_hdf(filename, q_func=q_func, with_input_sample=False)
np.testing.assert_array_equal(grid_result.output_samples, load_grid_result.output_samples)
import os
os.remove(filename)
K = None
n = 1000
grid_type = 'vertices'
grid_result = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type, random_state=random_state)
grid_result.q_func = q_func
print("Kendall's Tau : {}, Quantile: {}".format(grid_result.kendalls.ravel(), grid_result.quantities))
from depimpact.plots import matrix_plot_input
matrix_plot_input(grid_result.min_result);
dim = 5
quant_estimate.margins = [ot.Normal()]*dim
families = np.zeros((dim, dim), dtype=int)
families[2, 0] = 1
quant_estimate.families = families
families
quant_estimate.bounds_tau = None
quant_estimate.bounds_tau
quant_estimate.vine_structure
K = 20
n = 10000
grid_type = 'vertices'
grid_result = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type, random_state=random_state)
grid_result.q_func = q_func
min_result = grid_result.min_result
print('Worst Quantile: {} at {}'.format(min_result.quantity, min_result.dep_param))
matrix_plot_input(min_result)
plt.plot(grid_result.dep_params, grid_result.quantities, '.', label='Quantiles')
plt.plot(min_result.dep_param[0], min_result.quantity, 'ro', label='Minimum')
plt.xlabel('Dependence parameter')
plt.ylabel('Quantile value')
plt.legend(loc=0);
families = np.zeros((dim, dim), dtype=int)
for i in range(1, dim):
for j in range(i):
families[i, j] = 1
quant_estimate.margins = margins
quant_estimate.families = families
quant_estimate.vine_structure = None
quant_estimate.bounds_tau = None
quant_estimate.bounds_tau
K = 100
n = 1000
grid_type = 'lhs'
grid_result = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type, random_state=random_state)
min_result = grid_result.min_result
print('Worst Quantile: {0} at {1}'.format(min_result.quantity, min_result.dep_param))
families[3, 2] = 0
quant_estimate = ConservativeEstimate(model_func=func_sum, margins=margins, families=families)
K = 100
n = 10000
grid_type = 'lhs'
grid_result = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type,
q_func=q_func, random_state=random_state)
min_result = grid_result.min_result
print('Worst Quantile: {0} at {1}'.format(min_result.quantity, min_result.dep_param))
grid_result.vine_structure
from depimpact.plots import matrix_plot_input
matrix_plot_input(min_result)
K = 100
n = 1000
grid_type = 'lhs'
grid_result_1 = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type, save_grid=True, grid_path='./output')
grid_result_2 = quant_estimate.gridsearch(n_dep_param=K, n_input_sample=n, grid_type=grid_type,
q_func=q_func, use_grid=0, grid_path='./output')
grid_result_1.n_input_sample, grid_result_2.n_input_sample
grid_result = grid_result_1 + grid_result_2
grid_result.n_input_sample
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Additive model
Step2: Dimension 2
Step3: Copula families
Step4: Estimations
Step5: First, we compute the quantile at independence
Step6: We aim to minimize the output quantile. To do that, we create a q_func object from the function quantile_func to associate a probability $\alpha$ to a function that computes the empirical quantile from a given sample.
Step7: The computation returns a DependenceResult instance. This object gather the informations of the computation. It also computes the output quantity of interest (which can also be changed).
Step8: A boostrap can be done on the output quantity
Step9: And we can plot it
Step10: Grid Search Approach
Step11: The computation returns a ListDependenceResult which is a list of DependenceResult instances and some bonuses.
Step12: Lets set the quantity function and search for the minimum among the grid results.
Step13: We can plot the result in grid results. The below figure shows the output quantiles in function of the dependence parameters.
Step14: As for the individual problem, we can do a boostrap also, for each parameters. Because we have $K$ parameters, we can do a bootstrap for the $K$ samples, compute the $K$ quantiles for all the bootstrap and get the minimum quantile for each bootstrap.
Step15: For the parameter that have the most occurence for the minimum, we compute its bootstrap mean.
Step16: Kendall's Tau
Step17: As we can see, the bounds
Step18: Saving the results
Step19: Taking the extreme values of the dependence parameter
Step20: Higher Dimension
Step21: Copula families with one dependent pair
Step22: We reset the families and bounds for the current instance. (I don't want to create a new instance, just to check if the setters are good).
Step23: Let's do the grid search to see
Step24: The quantile is lower compare to the problem of dimension 1. Indeed, there is more variables, more uncertainty, so a larger deviation of the output.
Step25: Copula families with all dependent pairs
Step26: With one fixed pair
Step27: Save the used grid and load it again
Step28: Then gather the results from the same grid with the same configurations
Step29: Because the configurations are the same, we can gather the results from two different runs
|
7,202
|
<ASSISTANT_TASK:>
Python Code:
%%%timeit
maths = list()
for x in range(10):
maths.append(x**x)
%%%timeit
maths = [x**x for x in range(10)]
# maths
import matplotlib.pyplot as plt
import math
import numpy as np
%matplotlib inline
t = np.arange(0., 5., 0.2)
plt.plot(t, t, 'r--', t, t**2, 'bs')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can make pretty graphs
|
7,203
|
<ASSISTANT_TASK:>
Python Code:
# Import spaCy and load the language library
import spacy
nlp = spacy.load('en_core_web_sm')
# Create a string that includes opening and closing quotation marks
mystring = '"We\'re moving to L.A.!"'
print(mystring)
# Create a Doc object and explore tokens
doc = nlp(mystring)
for token in doc:
print(token.text, end=' | ')
doc2 = nlp(u"We're here to help! Send snail-mail, email support@oursite.com or visit us at http://www.oursite.com!")
for t in doc2:
print(t)
doc3 = nlp(u'A 5km NYC cab ride costs $10.30')
for t in doc3:
print(t)
doc4 = nlp(u"Let's visit St. Louis in the U.S. next year.")
for t in doc4:
print(t)
len(doc)
len(doc.vocab)
doc5 = nlp(u'It is better to give than to receive.')
# Retrieve the third token:
doc5[2]
# Retrieve three tokens from the middle:
doc5[2:5]
# Retrieve the last four tokens:
doc5[-4:]
doc6 = nlp(u'My dinner was horrible.')
doc7 = nlp(u'Your dinner was delicious.')
# Try to change "My dinner was horrible" to "My dinner was delicious"
doc6[3] = doc7[3]
doc8 = nlp(u'Apple to build a Hong Kong factory for $6 million')
for token in doc8:
print(token.text, end=' | ')
print('\n----')
for ent in doc8.ents:
print(ent.text+' - '+ent.label_+' - '+str(spacy.explain(ent.label_)))
len(doc8.ents)
doc9 = nlp(u"Autonomous cars shift insurance liability toward manufacturers.")
for chunk in doc9.noun_chunks:
print(chunk.text)
doc10 = nlp(u"Red cars do not carry higher insurance rates.")
for chunk in doc10.noun_chunks:
print(chunk.text)
doc11 = nlp(u"He was a one-eyed, one-horned, flying, purple people-eater.")
for chunk in doc11.noun_chunks:
print(chunk.text)
from spacy import displacy
doc = nlp(u'Apple is going to build a U.K. factory for $6 million.')
displacy.render(doc, style='dep', jupyter=True, options={'distance': 110})
doc = nlp(u'Over the last quarter Apple sold nearly 20 thousand iPods for a profit of $6 million.')
displacy.render(doc, style='ent', jupyter=True)
doc = nlp(u'This is a sentence.')
displacy.serve(doc, style='dep')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <img src="../tokenization.png" width="600">
Step2: <font color=green>Note that the exclamation points, comma, and the hyphen in 'snail-mail' are assigned their own tokens, yet both the email address and website are preserved.</font>
Step3: <font color=green>Here the distance unit and dollar sign are assigned their own tokens, yet the dollar amount is preserved.</font>
Step4: <font color=green>Here the abbreviations for "Saint" and "United States" are both preserved.</font>
Step5: Counting Vocab Entries
Step6: <font color=green>NOTE
Step7: Tokens cannot be reassigned
Step8: Named Entities
Step9: <font color=green>Note how two tokens combine to form the entity Hong Kong, and three tokens combine to form the monetary entity
Step10: Named Entity Recognition (NER) is an important machine learning tool applied to Natural Language Processing.<br>We'll do a lot more with it in an upcoming section. For more info on named entities visit https
Step11: We'll look at additional noun_chunks components besides .text in an upcoming section.<br>For more info on noun_chunks visit https
Step12: The optional 'distance' argument sets the distance between tokens. If the distance is made too small, text that appears beneath short arrows may become too compressed to read.
Step13: Creating Visualizations Outside of Jupyter
|
7,204
|
<ASSISTANT_TASK:>
Python Code:
# 1 Read dataset
cols = [
'clump thickness',
'uniformity of cell size',
'uniformity of cell shape',
'marginal adhesion',
'single epithelial cell size',
'bare nuclei',
'bland chromatin',
'normal nucleoli',
'mitoses',
'class']
df = pd.read_csv('breast-cancer-wisconsin.data',index_col=0,header=None)
df.index.name = 'id number'
df.columns=cols
df.head()
# Change class labels to 0 and 1 for simplicity
df['class']=df['class'].apply(lambda x: 0 if x == 2 else 1 )
# Is there missing data or NaNs?
df_size = len(df)
df_null = df.isnull().values.any()
#print("Data frame size {}, missing data in {}".format(df_size,len(df_null)))
df_null
#No null values
# See if there are strange values in the dataset:
def visualize_unique_values(df):
# Loop over each column
print('Column','Unique_values')
for icol in range(len(cols)):
# Select a column
df_col = df.ix[:,icol]
# Check unique values
unique_values = df_col.unique()
print(cols[icol],unique_values)
visualize_unique_values(df)
bare_nuclei = df['bare nuclei']
# 1 get data frame with all non missing data:
df2 = bare_nuclei.loc[bare_nuclei != '?']
print(len(df2),len(df))
# Get the mode value
col_mode=eval(df2.mode().values[0])
print("Mode :", col_mode)
#Verify:
bare_nuclei.value_counts()
# Convert data to mode:
df2 = bare_nuclei.apply(lambda x: col_mode if x == '?' else int(x) )
#Check it worked:
print(df2.unique())
# Replace dataset column with clean data
df['bare nuclei'] = df2
# Check this actually worked
visualize_unique_values(df)
y = df['class']
X = df.copy()
del X['class']
class1 = y[y == 0]
class2 = y[y == 1]
print("Class balance\n Class 0: {}\n Class 1: {}\n Ratio: {}".format(len(class1),len(class1),len(class1)/len(class2)))
%matplotlib inline
import seaborn as sns
sns.pairplot(df, hue="class")
#df.head(1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=2, stratify= y )
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
model = RandomForestClassifier()
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test,y_pred))
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
# Max number of features by default is sqrt(n_features), which is good to keep to prevent from overfitting.
#If “auto”, then max_features=sqrt(n_features).
rfc = RandomForestClassifier()
params = {'criterion': ['gini','entropy'],'n_estimators': range(10, 50, 10)}
searcher = RandomizedSearchCV(rfc, params, n_jobs=-1, n_iter=4,scoring='f1')
searcher.fit(X_train, y_train) #assuming X and y are your data
import numpy as np
# Utility function to report best scores
def report(results, n_top=1):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
report(searcher.cv_results_)
model = RandomForestClassifier(n_estimators=30, max_depth = None)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test,y_pred))
from sklearn.metrics import f1_score
n_steps=10
step = int(len(X_train)/n_steps)
results=[]
for ix in range(n_steps):
size_train = step + ix * step
model.fit(X_train[:size_train],y_train[:size_train])
y_pred = model.predict(X_test)
score = f1_score(y_test,y_pred)
results.append([size_train,score])
results = np.array(results)
import matplotlib.pyplot as plt
plt.plot(results[:,0],results[:,1])
plt.xlabel('Training set size')
plt.ylabel('F1 score')
plt.title('Learning curve')
plt.grid(True)
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
class_names=[0,1]
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
from scipy import interp
# #############################################################################
# Data IO and generation
# Import some data to play with
X = X_train
y = y_train
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
#random_state = np.random.RandomState(0)
#X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# #############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = RandomForestClassifier(n_estimators=30, random_state = 0)
#svm.SVC(kernel='linear', probability=True,
# random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Confusion matrix
from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Clean data
Step2: There is no missing data in the dataset.
Step3: Warning.
Step4: Note that 402 rows have the mode value of '1'.
Step5: Model
Step6: Take home message
Step7: Take home messages
Step8: As said before, I use a random forest classifier since this works best for umbalanced classes
Step9: WOW, the model performed great with default parameters.
Step11: The model barely improves its accuracy.
Step12: The learning curve is oscillating by 0.02
|
7,205
|
<ASSISTANT_TASK:>
Python Code:
import osmdigest.digest as digest
import os
#filename = os.path.join("//media", "disk", "OSM_Data", "isle-of-wight-latest.osm.xz")
filename = os.path.join("..", "..", "..", "Data", "isle-of-wight-latest.osm.xz")
building_node_ids = []
addr_node_ids = []
for x in digest.parse(filename):
if isinstance(x, digest.Node):
if "building" in x.tags:
building_node_ids.append(x)
if any(key.startswith("addr:") for key in x.tags):
addr_node_ids.append(x)
len(building_node_ids), building_node_ids[:5]
len(addr_node_ids), addr_node_ids[:5]
building_way_ids = []
addr_way_ids = []
for x in digest.parse(filename):
if isinstance(x, digest.Way):
if "building" in x.tags:
building_way_ids.append(x)
if any(key.startswith("addr:") for key in x.tags):
addr_way_ids.append(x)
len(building_way_ids), building_way_ids[:5]
len(addr_way_ids), addr_way_ids[:5]
building_rel_ids = []
addr_rel_ids = []
for x in digest.parse(filename):
if isinstance(x, digest.Relation):
if "building" in x.tags:
building_rel_ids.append(x)
if any(key.startswith("addr:") for key in x.tags):
addr_rel_ids.append(x)
len(building_rel_ids), building_rel_ids[:5]
len(addr_rel_ids), addr_rel_ids[:5]
import numpy as np
import pandas as pd
gen = digest.parse(filename)
print(next(gen))
print(next(gen))
possible_address_tags = set()
for x in gen:
for key in x.tags:
if key.startswith("addr:"):
possible_address_tags.add(key)
possible_address_tags
gen = digest.parse(filename)
osm = next(gen)
bounds = next(gen)
address_data = { key : [] for key in possible_address_tags }
address_data["osm_id"] = []
for x in gen:
addr = {key : x.tags[key] for key in x.tags if key.startswith("addr:")}
if len(addr) > 0:
address_data["osm_id"].append(x.name+"/"+str(x.osm_id))
for key in possible_address_tags:
if key in addr:
address_data[key].append(addr[key])
else:
address_data[key].append(np.nan)
data = pd.DataFrame(address_data)
data = data.set_index("osm_id")
data[:5]
import osmdigest.sqlite as sq
import os
filename = os.path.join("//tmp", "aaa", "illinois-latest.db")
#filename = os.path.join("..", "..", "..", "Data", "illinois-latest.db")
db = sq.OSM_SQLite(filename)
def iterate_over_tags(iterator):
buildings, addresses = [], []
for element in iterator:
if any(key.startswith("building") for key in element.tags):
buildings.append(element)
if any(key.startswith("addr") for key in element.tags):
addresses.append(element)
return buildings, addresses
building_nodes, address_nodes = iterate_over_tags(db.nodes())
len(building_nodes), building_nodes[:5]
len(address_nodes), address_nodes[:5]
building_ways, address_ways = iterate_over_tags(db.ways())
len(building_ways), building_ways[:5]
len(address_ways), address_ways[:5]
building_rels, address_rels = iterate_over_tags(db.relations())
len(building_rels), building_rels[:5]
len(address_rels), address_rels[:5]
features = []
def make_feature(el, centroid):
return { "properties": {
"street": el.tags["addr:street"],
"housenumber": el.tags["addr:housenumber"],
"osm_id": "{}/{}".format(el.name, el.osm_id)
},
"geometry": { "type": "Point",
"coordinates": centroid } }
for el in db.search_node_tag_keys({"addr:street", "addr:housenumber"}):
features.append(make_feature(el, [el.longitude, el.latitude]))
for el in db.search_way_tag_keys({"addr:street", "addr:housenumber"}):
way = db.complete_way(el)
features.append(make_feature(el, way.centroid()))
for el in db.search_relation_tag_keys({"addr:street", "addr:housenumber"}):
rel = db.complete_relation(el)
features.append(make_feature(el, rel.centroid()))
import geopandas as gpd
frame = gpd.GeoDataFrame.from_features(features)
#frame = frame.set_geometry("centroid")
frame[:5]
unexpected_addresses = frame[~ frame.housenumber.map(lambda x : all(y>='0' and y<='9' for y in x))]
unexpected_addresses.head()
unexpected_addresses[5:10]
import re
one = re.compile("^\\d+\\s+#\\d+$")
assert one.match("4262 #12") is not None
assert one.match("4 62 #12") is None
two = re.compile("^\\d+[NSEW]\\d+$")
assert two.match("19N479") is not None
assert two.match("19NS479") is None
assert two.match("19 479") is None
three = re.compile("^\\d+\\s*[a-zA-Z]\\.*$")
assert three.match("152A") is not None
assert three.match("152 A") is not None
assert three.match("152 A.") is not None
assert three.match("152Ac") is None
four = re.compile("^\\d+\\s*1/2")
matches = {one, two, three, four}
left = unexpected_addresses[~ unexpected_addresses.housenumber.map(lambda x : any(m.match(x) is not None for m in matches))]
left.head()
for way in db.search_way_tag_keys({"addr:interpolation"}):
print(way)
import fiona
fiona.supported_drivers
frame.to_file("illinois_building.json", driver="GeoJSON")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Nodes tagged as buildings / with addresses
Step2: Same for ways
Step3: Finally for relations
Step4: Process to a pandas dataframe
Step5: Do the same for USA / Chicago data
Step6: Nodes
Step7: Ways
Step8: Relations
Step9: Process into a useful form
Step10: Explore inconsistencies
Step11: The addresses with "#" in appear to be neighbours in a "trailer park", judging from satelite images. This addressing scheme follows how apartments/flats are addressed.
Step12: So let's filter out things which match. (Stand-back, I know regular expressions).
Step13: This leaves a lot left over.
Step14: Finally, save the data out. We can use any supported fiona driver. Here I use GeoJSON, as it's human readable, and no less space efficient than a Shapefile. A Shapefile can be imported into QGis etc., of course.
|
7,206
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from sympy.abc import n, i, N, x, lamda, phi, z, j, r, k, a, alpha
from commons import *
from matrix_functions import *
from sequences import *
import functions_catalog
init_printing()
m=8
C = define(let=Symbol(r'\mathcal{{C}}_{{ {} }}'.format(m)),
be=Matrix(m, m, lambda n,k: (k+1)*binomial(2*n-k, n-k)/(n+1) if n > 0 else int(not k)))
C
eigendata = spectrum(C)
eigendata
data, eigenvals, multiplicities = eigendata.rhs
Phi_poly = Phi_poly_ctor(deg=m-1)
Phi_poly
Phi_polynomials = component_polynomials(eigendata, early_eigenvals_subs=True)
Phi_polynomials
cmatrices = component_matrices(C, Phi_polynomials)
list(cmatrices.values())
f_power, g_power, G_power = functions_catalog.power(eigendata, Phi_polynomials)
C_power = G_power(C)
C_power
define(C_power.lhs, C_power.rhs.applyfunc(factor)) # factored
assert C_power.rhs == (C.rhs**r).applyfunc(simplify)
inspect(C_power.rhs)
production_matrix(C_power.rhs).applyfunc(factor)
f_inverse, g_inverse, G_inverse = functions_catalog.inverse(eigendata, Phi_polynomials)
C_inverse = G_inverse(C)
C_inverse, G_inverse(C_inverse)
inspect(C_inverse.rhs)
production_matrix(C_inverse.rhs)
assert C_inverse.rhs*C.rhs == Matrix(m, m, identity_matrix())
assert C_inverse.rhs == C_power.rhs.subs({r:-1})
f_sqrt, g_sqrt, G_sqrt = functions_catalog.square_root(eigendata, Phi_polynomials)
C_sqrt = G_sqrt(C)
C_sqrt
inspect(C_sqrt.rhs)
production_matrix(C_sqrt.rhs)
assert C.rhs**(S(1)/2) == C_sqrt.rhs
assert C_sqrt.rhs == C_power.rhs.subs({r:S(1)/2})
f_exp, g_exp, G_exp = functions_catalog.exp(eigendata, Phi_polynomials)
C_exp = G_exp(C)
C_exp
define(C_exp.lhs, C_exp.rhs.applyfunc(factor))
C_exp1 = define(let=Subs(C_exp.lhs, alpha, 1), be=C_exp.rhs.subs({alpha:1}))
C_exp1
inspect(C_exp.rhs)
inspect(C_exp1.rhs)
eigendata_Cexpt = spectrum(C_exp1)
eigendata_Cexpt
Phi_polynomials_Cexpt = component_polynomials(eigendata_Cexpt, early_eigenvals_subs=True)
Phi_polynomials_Cexpt
f_log, g_log, G_log = functions_catalog.log(eigendata, Phi_polynomials)
C_log = G_log(C)
C_log
inspect(C_log.rhs[1:,:-1])
production_matrix(C_log.rhs[1:,:-1])
g_log_Cexpt = Hermite_interpolation_polynomial(f_log, eigendata_Cexpt, Phi_polynomials_Cexpt)
g_log_Cexpt
g_log_Cexpt = g_log_Cexpt.subs(eigendata_Cexpt.rhs[1])
g_log_Cexpt
with lift_to_matrix_function(g_log_Cexpt) as G_log_Cexpt:
CC = G_log_Cexpt(C_exp1)
CC
f_sin, g_sin, G_sin = functions_catalog.sin(eigendata, Phi_polynomials)
C_sin = G_sin(C)
C_sin
inspect(C_sin.rhs) # takes long to evaluate
f_cos, g_cos, G_cos = functions_catalog.cos(eigendata, Phi_polynomials)
C_cos = G_cos(C)
C_cos
inspect(C_sin.rhs) # takes long to evaluate
assert (C_sin.rhs**2 + C_cos.rhs**2).applyfunc(trigsimp) == Matrix(m,m, identity_matrix())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Catalan array $\mathcal{C}$
Step2: power function
Step3: inverse function
Step4: sqrt function
Step5: expt function
Step6: log function
Step7: sin function
Step8: cos function
|
7,207
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from prophet import Prophet
df = pd.read_csv('../examples/example_wp_log_peyton_manning.csv')
df.head()
m = Prophet()
m.fit(df)
future = m.make_future_dataframe(periods=365)
future.tail()
forecast = m.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
fig1 = m.plot(forecast)
fig2 = m.plot_components(forecast)
from prophet.plot import plot_plotly, plot_components_plotly
plot_plotly(m, forecast)
plot_components_plotly(m, forecast)
%%R
library(prophet)
%%R
df <- read.csv('../examples/example_wp_log_peyton_manning.csv')
%%R
m <- prophet(df)
%%R
future <- make_future_dataframe(m, periods = 365)
tail(future)
%%R
forecast <- predict(m, future)
tail(forecast[c('ds', 'yhat', 'yhat_lower', 'yhat_upper')])
%%R -w 10 -h 6 -u in
plot(m, forecast)
%%R -w 9 -h 9 -u in
prophet_plot_components(m, forecast)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We fit the model by instantiating a new Prophet object. Any settings to the forecasting procedure are passed into the constructor. Then you call its fit method and pass in the historical dataframe. Fitting should take 1-5 seconds.
Step2: Predictions are then made on a dataframe with a column ds containing the dates for which a prediction is to be made. You can get a suitable dataframe that extends into the future a specified number of days using the helper method Prophet.make_future_dataframe. By default it will also include the dates from the history, so we will see the model fit as well.
Step3: The predict method will assign each row in future a predicted value which it names yhat. If you pass in historical dates, it will provide an in-sample fit. The forecast object here is a new dataframe that includes a column yhat with the forecast, as well as columns for components and uncertainty intervals.
Step4: You can plot the forecast by calling the Prophet.plot method and passing in your forecast dataframe.
Step5: If you want to see the forecast components, you can use the Prophet.plot_components method. By default you'll see the trend, yearly seasonality, and weekly seasonality of the time series. If you include holidays, you'll see those here, too.
Step6: An interactive figure of the forecast and components can be created with plotly. You will need to install plotly 4.0 or above separately, as it will not by default be installed with prophet. You will also need to install the notebook and ipywidgets packages.
Step7: More details about the options available for each method are available in the docstrings, for example, via help(Prophet) or help(Prophet.fit). The R reference manual on CRAN provides a concise list of all of the available functions, each of which has a Python equivalent.
Step8: First we read in the data and create the outcome variable. As in the Python API, this is a dataframe with columns ds and y, containing the date and numeric value respectively. The ds column should be YYYY-MM-DD for a date, or YYYY-MM-DD HH
Step9: We call the prophet function to fit the model. The first argument is the historical dataframe. Additional arguments control how Prophet fits the data and are described in later pages of this documentation.
Step10: Predictions are made on a dataframe with a column ds containing the dates for which predictions are to be made. The make_future_dataframe function takes the model object and a number of periods to forecast and produces a suitable dataframe. By default it will also include the historical dates so we can evaluate in-sample fit.
Step11: As with most modeling procedures in R, we use the generic predict function to get our forecast. The forecast object is a dataframe with a column yhat containing the forecast. It has additional columns for uncertainty intervals and seasonal components.
Step12: You can use the generic plot function to plot the forecast, by passing in the model and the forecast dataframe.
Step13: You can use the prophet_plot_components function to see the forecast broken down into trend, weekly seasonality, and yearly seasonality.
|
7,208
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy.random as rng
import pylab as pl
import autograd.numpy as np
from autograd import grad
def sigmoid(phi):
return 1.0/(1.0 + np.exp(-phi))
def calc_outputs(params):
# Sigmoid perceptron ('logistic regression')
XX = X - params['m']
phi = np.dot(XX, params['w'])
psi = np.dot(XX, params['u'])
job = sigmoid(psi)
out = sigmoid(job*phi + (1-job)*params['b'])
return job, out
def classification_loss(params):
job,Y = calc_outputs(params)
return np.sum(Targ*np.log(Y) + (1-Targ)*np.log(1-Y))
### We use autograd, delivers gradient of the loss
classification_gradient = grad(classification_loss)
def init_params(Nins, weights_scale=1.0):
m = 2.0*rng.normal(0,1,size=(Nins)) # a point in the input space, through which decision plane passes
w = weights_scale*rng.normal(0,1,size=(Nins)) # direction of normal to a decision hyperplane
u = weights_scale*rng.normal(0,1,size=(Nins)) # direction of normal to a decision hyperplane
b = .25+.5*rng.random() # number between 0 and 1 = prob of Targ=1 in the gate=0 region
params = {'m':m, 'w':w, 'u':u, 'b':b}
return(params)
def do_plots(params):
m,u,w,b = params['m'],truth_params['u'],truth_params['w'],truth_params['b']
gate, out = calc_outputs(params)
cl ={0:'red', 1:'black'}
pl.subplot(131)
for i, x in enumerate(X):
pl.plot(x[0],x[1],'.',color=cl[int(Targ[i])],alpha=.5,markersize=12)
pl.axis('off'); pl.axis('equal'); pl.title('truth')
pl.subplot(132)
for i, x in enumerate(X):
pl.plot(x[0],x[1],'.',color='blue',alpha=.1+.8*gate[i], markersize=12)
pl.axis('off'); pl.axis('equal'); pl.title('my job')
pl.axis('off')
pl.axis('equal')
pl.subplot(133)
for i, x in enumerate(X):
pl.plot(x[0],x[1],'.',color='black',alpha=.5*out[i],markersize=12)
pl.plot(x[0],x[1],'.',color='red', alpha=.5*(1-out[i]),markersize=12)
pl.axis('off'); pl.axis('equal'); pl.title('out')
Npats, Nins = 300, 2
X = 2*rng.normal(0,1,size=(Npats,Nins)) # the data points
truth_params = init_params(Nins, 4.)
job, out = calc_outputs(truth_params)
Targ = 1.*(rng.random(out.shape) < out)
do_plots(truth_params)
def do_one_learning_step(params,X,Targ,rate): # one step of learning
grads = classification_gradient(params)
for s in params.keys():
change = rate * grads[s]
params[s] = params[s] + change
params[s] = params[s] * .999
params['b'] = params['b'] * .95 # "weight decay" on b encourages mediocrity in NotMyJob
return (params)
#params = truth_params.copy() #to start from truth - just a test obviously!
params = init_params(Nins, 0.5) # to start from random point in parameter space
L = []
rate = 0.002
steps = 1000.
T = np.arange(steps)
for t in T:
params = do_one_learning_step(params,X,Targ,rate)
L.append(classification_loss(params))
pl.plot(T,L)
Lopt = classification_loss(truth_params)
print(Lopt)
pl.plot([0,np.max(T)],[Lopt,Lopt],'-g')
do_plots(params)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make up an 'on-model' dataset
Step2: Learning, starting from random weights and bias.
|
7,209
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os, subprocess, mplleaflet, re, json
import xml.etree.ElementTree as ET
import pandas as pd
import geopandas as gpd
import seaborn as sns
from pymongo import MongoClient
from pprint import pprint
from collections import defaultdict
from shapely.geometry import shape
print "File Size .... {:.2f} MB".format(os.stat("./data/sydney_australia.osm").st_size / 1024.0 ** 2)
# convert xml to geojson
cmd_convert = [
'node', '--max_old_space_size=4096', './node_modules/osmtogeojson/osmtogeojson', './data/sydney_australia.osm'
]
geojson = subprocess.check_output(cmd_convert, shell=True)
# extract features
with open('./data/sydney_australia.geojson', 'r+') as f:
geojson = json.loads(geojson)
json.dump(geojson['features'], f)
# reset mongodb
cmd_resetDB = ['mongo', 'osm', '--eval', 'db.dropDatabase()']
subprocess.call(cmd_resetDB)
# import geojson to mongodb
cmd_import = [
'mongoimport', '--db', 'osm', '--collection', 'sydney', '--file',
'./data/sydney_australia.geojson', '--jsonArray'
]
subprocess.call(cmd_import)
client = MongoClient()
db = client.osm
sydney = db.sydney
pprint(sydney.find_one(
{ 'id': { '$regex': 'way*' } },
{ 'geometry': 0 } # hide geometry for better display.
))
number_of_documents = sydney.find({}).count()
print number_of_documents
number_of_nodes = sydney.find({'id': {'$regex': 'node*'} }).count()
print number_of_nodes
number_of_ways = sydney.find({ 'id': {'$regex': 'way*'} }).count()
print number_of_ways
number_of_relation = sydney.find({ 'id': {'$regex': 'relation*'} }).count()
print number_of_relation
# verify those three covers all of the document
number_of_documents == number_of_nodes + number_of_ways + number_of_relation
len(list(sydney.aggregate([
{'$group': {
'_id': '$properties.uid',
'count': { '$sum': 1 }
}
}
])))
def entries_with_unexpected_postcode():
postcodes = sydney.find(
{'$and': [
{'properties.addr:postcode': {'$not': re.compile('^2[0-5][0-9][0-9]$') }}, # 2000—2599
{'properties.addr:postcode': {'$not': re.compile('^2[6-8][2-9][0-9]$') }}, # 2620—2899
{'properties.addr:postcode': {'$not': re.compile('^29[2-9][1-9]$') }}, # 2921—2999
{'properties.addr:postcode': {'$exists': 1}}
]},
{ 'properties.addr:postcode': 1 }
)
return list(postcodes)
pprint(entries_with_unexpected_postcode()[0:5])
with_nsw_prefix = sydney.find(
{'properties.addr:postcode': re.compile('NSW') }, # 2000—2599
{ 'properties.addr:postcode': 1 }
)
pattern = re.compile('^NSW ([0-9]{4})$')
for entry in with_nsw_prefix:
postcode = entry['properties']['addr:postcode']
result = sydney.update_one(
{'_id': entry['_id'] },
{'$set': {'properties.addr:postcode': pattern.search(postcode).group(1)} }
)
rest_of_invalid = entries_with_unexpected_postcode()
pprint(rest_of_invalid)
for entries in entries_with_unexpected_postcode():
sydney.update_one(
{ '_id': entries['_id'] },
{ '$unset': {'properties.addr:postcode': ''} }
)
invalid_street_number = list(sydney.find(
{'$and': [
{'properties.addr:housenumber': { '$not': re.compile('[0-9]+(-[0-9]+)?') } },
{'properties.addr:housenumber': { '$exists': 1 } }
]},
{'properties.addr:housenumber': 1 }
))
pprint(invalid_street_number)
for entry in invalid_street_number:
sydney.update_one(
{ '_id': entry['_id'] },
{ '$unset': { 'properties.addr:housenumber': '' } }
)
pprint(sydney.find_one({'id': {'$regex': 'way*'}, 'properties.highway': {'$exists': 1} }, {'geometry': 0}))
roads = list(sydney.aggregate([
{'$match': {
'id': { '$regex': 'way*' },
'properties.highway': { '$exists': 1 }
}
},
{'$group': {
'_id': '$properties.highway',
'count': { '$sum': 1 }
}
}
]))
pprint(roads)
road_types = map(lambda x: x['_id'], roads)
# this is a list I pulled manually from OSM document on map feature
osm_highway_types = set([
# way
"motorway", "trunk", "primary", "secondary", "tertiary", "unclassified", "residential", "service",
"motorway_link", "trunk_link", "primary_link", "secondary_link", "tertiary_link", "living_street", "pedestrian",
"track", "bus_guideway", "escape", "raceway", "road", "footway", "bridleway", "steps", "path", "cycleway", "crossing"
# area
"rest_area", "services",
# lifecycle
"proposed", "construction"
])
set(road_types) - osm_highway_types # non-standard highway types
corridors = gpd.GeoDataFrame(list(sydney.find({'properties.highway': "corridor"})))
corridors['geometry'] = corridors['geometry'].apply(lambda x: shape(x))
corridors
gpd.GeoDataFrame(corridors.iloc[0]).transpose().plot(color='red', linewidth='5')
mplleaflet.display()
corridors[1:9].plot(color='red', linewidth='5')
mplleaflet.display()
gpd.GeoDataFrame(corridors.ix[10]).transpose().plot(color='red', linewidth='5')
mplleaflet.display()
x = sydney.update_many(
{'properties.highway': 'corridor'},
{'$set': { "properties.highway": "footpath" } },
)
pprint(list(sydney.find({'properties.highway': 'corridor'}))) #double checking
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, Let's check the size of our data file.
Step2: I will convert the XML file into GeoJSON format then import it to a mongo database. It's should be much easier to do various data wrangling tasks inside mongo as we can leverage on various its advanced tools.
Step3: 3. A Quick Peak into Database
Step4: First, let's take a look at the data in its GeoJSON format.
Step5: The id field contains the top-level xml tags name and its id, seperated by a /.
Step6: Number of nodes
Step7: Number of ways
Step8: Number of relations
Step9: Number of Contributors
Step10: 4. Data Audit and Cleaning
Step11: One of most common inconsistent pattern is having the 'NSW' prefixed to the postcode. So let's update those to have the 4-digits numbers only.
Step12: let run the entries_with_unexpected_postcode again to see what other inconsistent or invalid pattern are there in the database
Step13: The rest of the postcodes are just invalid entries. Let's remove those postcodes from our database.
Step14: Problem #2
Step15: I will fix it by dropping those addr
Step16: Problem #3
Step17: With this list of road type appeared in the dataset, I then compared it against a list of standard highway types I pulled mannually from OSM documentation and print out a list of highway type that is not listed in OSM documentation.
Step18: The above set includes all of the non standard highway types. Let take a closer look at the first one, Corridor.
Step19: Only the first and last of those have name, "Epping Club Walk" and "Epping Station Concourse". The other 9 way tags are created by Leon K. And among those, 8 of them are created at the same time.
Step20: Looks like, this is an footpath inside a building
Step21: Those are actually the corridors inside the airport terminal.
Step22: Looks like, this is also a footpath inside a building.
|
7,210
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import fetch_mldata
from sklearn.utils import shuffle
mnist = fetch_mldata('MNIST original', data_home='./mnist_data')
X, y = shuffle(mnist.data[:60000], mnist.target[:60000])
X_small = X[:100]
y_small = y[:100]
# Note: using only 10% of the training data
X_large = X[:6000]
y_large = y[:6000]
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
rfc = RandomForestClassifier(n_jobs=-1)
param_distributions = {
'max_features': uniform(0.6, 0.4),
'n_estimators': range(20, 401, 20),
'max_depth': range(5, 41, 5) + [None],
'min_samples_split': [0.01, 0.05, 0.1]
}
n_iter = 100
search = RandomizedSearchCV(estimator=rfc, param_distributions=param_distributions, n_iter=n_iter, n_jobs=-1, verbose=3)
%time search.fit(X_small, y_small)
print(search.best_score_, search.best_params_)
project_id = 'YOUR-PROJECT-ID'
bucket_name = 'YOUR-BUCKET-NAME'
cluster_id = 'YOUR-CLUSTER-ID'
image_name = 'YOUR-IMAGE-NAME'
zone = 'us-central1-b'
source_dir = 'source'
from helpers.cloudbuild_helper import build
build(project_id, source_dir, bucket_name, image_name)
from helpers.gke_helper import create_cluster
create_cluster(project_id, zone, cluster_id, n_nodes=1, machine_type='n1-standard-64')
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
rfc = RandomForestClassifier(n_jobs=-1)
param_distributions = {
'max_features': uniform(0.6, 0.4),
'n_estimators': range(20, 401, 20),
'max_depth': range(5, 41, 5) + [None],
'min_samples_split': [0.01, 0.05, 0.1]
}
n_iter = 100
search = RandomizedSearchCV(estimator=rfc, param_distributions=param_distributions, n_iter=n_iter, n_jobs=-1, verbose=3)
from gke_parallel import GKEParallel
gke_search = GKEParallel(search, project_id, zone, cluster_id, bucket_name, image_name)
! bash get_cluster_credentials.sh $cluster_id $zone
gke_search.fit(X_large, y_large)
gke_search.param_distributions
gke_search.task_name
gke_search.job_names
#gke_search.cancel()
gke_search.done(), gke_search.dones
result = gke_search.result(download=False)
from helpers.kubernetes_helper import get_pod_logs
for pod_name, log in get_pod_logs().items():
print('=' * 20)
print('\t{}\n'.format(pod_name))
print(log)
from helpers.gke_helper import delete_cluster
#delete_cluster(project_id, zone, cluster_id)
import time
from helpers.gke_helper import delete_cluster
while not gke_search.done():
n_done = len([d for d in gke_search.dones.values() if d])
print('{}/{} finished'.format(n_done, len(gke_search.job_names)))
time.sleep(60)
delete_cluster(project_id, zone, cluster_id)
result = gke_search.result(download=True)
from helpers.gcs_helper import download_uri_and_unpickle
gcs_uri = 'gs://YOUR-BUCKET-NAME/YOUR-CLUSTER-ID.YOUR-IMAGE-NAME.UNIX-TIME/gke_search.pkl'
gke_search_restored = download_uri_and_unpickle(gcs_uri)
gke_search.best_score_, gke_search.best_params_, gke_search.best_estimator_
predicted = gke_search.predict(mnist.data[60000:])
print(len([p for i, p in enumerate(predicted) if p == mnist.target[60000:][i]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Instantiate the estimator and the SearchCV objects
Step2: Fit the GridSearchCV object locally
Step3: Everything up to this point is what you would do when training locally. With larger amount of data it would take much longer.
Step4: A Google Cloud Storage bucket belonging to your project created through either
Step5: Pick a cluster id for the cluster on Google Container Engine we will create. Preferably not an existing cluster to avoid affecting its workload.
Step6: Choose a name for the image that will be running on the container.
Step7: Choose a zone to host the cluster.
Step8: Change this only if you have customized the source.
Step9: Build a Docker image
Step10: Create a cluster
Step11: For GCE instance pricing
Step12: Refresh access token to the cluster
Step13: Deploy the fitting task
Step14: Inspect the GKEParallel object
Step15: You could optionally specify a task_name when creating a GKEParallel instance.
Step16: Similarly, each job is given a job_name. The dictionary of job_names can be accessed as follows. Each worker pod handles one job processing one of the smaller param_grids.
Step17: Cancel the task
Step18: Monitor the progress
Step19: When all the jobs are finished, the pods will stop running (but the cluster will remain), and we can retrieve the fitted model.
Step20: You can also get the logs from the pods
Step21: Once the jobs are finished, the cluster can be deleted. All the fitted models are stored on GCS.
Step22: The next cell continues to poll the jobs until they are all finished, downloads the results, and deletes the cluster.
Step23: Restore the GKEParallel object
Step24: Inspect the result
Step25: You can also call predict(), which deligates the call to the best_estimator_.
|
7,211
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from numpy import genfromtxt
from matplotlib.font_manager import FontProperties
from pylab import rcParams
fontP = FontProperties()
fontP.set_size('small')
def loadData(filename):
return genfromtxt(filename, delimiter=' ')
def plotData(data):
rcParams['figure.figsize'] = 10, 6 # Set figure size to 10" wide x 6" tall
t = data[:,0]
altitude = data[:,1]
verticalspeed = data[:,2]
acceleration = data[:,3] # magnitude of acceleration
gforce = data[:,4]
throttle = data[:,5] * 100
dthrottle_p = data[:,6] * 100
dthrottle_d = data[:,7] * 100
# Top subplot, position and velocity up to threshold
plt.subplot(3, 1, 1)
plt.plot(t, altitude, t, verticalspeed, t, acceleration)
plt.axhline(y=100,linewidth=1,alpha=0.5,color='r',linestyle='--',label='goal');
plt.text(max(t)*0.9, 105, 'Goal Altitude', fontsize=8);
plt.title('Craft Altitude over Time')
plt.ylabel('Altitude (meters)')
plt.legend(['Altitude (m)','Vertical Speed (m/s)', 'Acceleration (m/s^2)'], "best", prop=fontP, frameon=False)
# Middle subplot, throttle & dthrottle
plt.subplot(3, 1, 2)
plt.plot(t, throttle, t, dthrottle_p, t, dthrottle_d)
plt.legend(['Throttle%','P','D'], "best", prop=fontP, frameon=False) # Small font, best location
plt.ylabel('Throttle %')
# Bottom subplot, gforce
plt.subplot(3, 1, 3)
plt.plot(t, gforce)
plt.axhline(y=1,linewidth=1,alpha=0.5,color='r',linestyle='--',label='goal');
plt.text(max(t)*0.9, 1.2, 'Goal g-force', fontsize=8);
plt.legend(['gforce'], "best", bbox_to_anchor=(1.0, 1.0), prop=fontP, frameon = False) # Small font, best location
plt.xlabel('Time (seconds)')
plt.ylabel('G-force');
plt.show();
data = loadData('collected_data\\gforce.txt')
plotData(data)
data = loadData('collected_data\\vspeed.txt')
plotData(data)
# To run in kOS console: RUN hover3(pos0.txt,20,0.05,0).
data = loadData('collected_data\\pos0.txt')
plotData(data)
# To run in kOS console: RUN hover3(pos1.txt,20,0.05,0).
data = loadData('collected_data\\pos1.txt')
plotData(data)
# To run in kOS console: RUN hover3(pos2.txt,20,0.08,0.04).
data = loadData('collected_data\\pos2.txt')
plotData(data)
# To run in kOS console: RUN hover4(hover0.txt,60,0.01,0.001).
data = loadData('collected_data\\hover0.txt')
plotData(data)
# To run in kOS console: RUN hover4(hover1.txt,60,0.01,0.01).
data = loadData('collected_data\\hover1.txt')
plotData(data)
# To run in kOS console: RUN hover4(hover2.txt,10,0.1,0.1).
data = loadData('collected_data\\hover2.txt')
plotData(data)
# To run in kOS console: RUN hover5(hover3.txt,10,0.1,0.1,300).
data = loadData('collected_data\\hover3.txt')
plotData(data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: G-force Control
Step2: Pretty cool! Once it passes 100m altitude the controller starts, the throttle controls for gforce, bringing it oscillating down around 1g. This zeros our acceleration but not our existing velocity, so the position continues to increase. We could add some derivative gain to damp down the gforce overshoot, but it won't solve this problem yet.
Step3: Awesome! The controller drops the velocity to a stable oscillation around 0 m/s, and the position seems to flatten off, but it isn't perfect. Maybe it's because of the oscillations? In the game I can see the engine spurt on and off rythmically. It seems to try and stay at roughly 0 m/s, but the position is not 100m and it drifts.
Step4: Well, we crashed.
Step5: Great! The controller burned us about 100m and then tried staying there, but there is quite a lot of bounce, maybe if we tweak our gains some.
Step6: Hmm, after trying a few other combinations, it seems like there's a conceptual error here keeping us from getting to a smooth point.
Step7: It's stably oscillating! This is a good sign, showing our hover setpoint is doing it's job, the proportional gain is there, and there's barely any derivative gain.
Step8: Woohoo! It overshoots a little but stablizes smoothly at 100m! Great to see this going in the game, looks a bit like the SpaceX grasshopper.
Step9: Much faster! What happens if we change the altitude to say 300m?
|
7,212
|
<ASSISTANT_TASK:>
Python Code:
# 数値計算やデータフレーム操作に関するライブラリをインポートする
import numpy as np
import pandas as pd
# URL によるリソースへのアクセスを提供するライブラリをインポートする。
# import urllib # Python 2 の場合
import urllib.request # Python 3 の場合
# 図やグラフを図示するためのライブラリをインポートする。
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.colors import LinearSegmentedColormap
from sklearn.decomposition import PCA #主成分分析器
# ウェブ上のリソースを指定する
url = 'https://raw.githubusercontent.com/chemo-wakate/tutorial-6th/master/beginner/data/winequality-red.txt'
# 指定したURLからリソースをダウンロードし、名前をつける。
# urllib.urlretrieve(url, 'winequality-red.csv') # Python 2 の場合
urllib.request.urlretrieve(url, 'winequality-red.txt') # Python 3 の場合
# データの読み込み
df1 = pd.read_csv('winequality-red.txt', sep='\t', index_col=0)
df1 # 中身の確認
df1.T # .T は行列の転置
# 図やグラフを図示するためのライブラリをインポートする。
import matplotlib.pyplot as plt
%matplotlib inline
df1['fixed acidity'].hist()
df1['fixed acidity'].hist(figsize=(5, 5), bins=20) # bin の数を増やす
# まとめて表示もできる
df1.hist(figsize=(20, 20), bins=20)
plt.show()
df1.plot(kind='scatter', x=u'pH', y=u'alcohol', grid=True)
df1.plot(kind='scatter', x=u'pH', y=u'alcohol', \
c=df1['quality'], cmap='coolwarm', grid=True)
plt.scatter(df1['pH'], df1['alcohol'], alpha=0.5, \
c=df1['quality'], cmap='coolwarm')
plt.colorbar(label='quality')
plt.xlabel('pH')
plt.ylabel('alcohol')
plt.grid()
cmap = plt.get_cmap('coolwarm')
colors = [cmap(c / 5) for c in np.arange(1, 6)]
fig, ax = plt.subplots(1, 1)
for i, (key, group) in enumerate(df1.groupby('quality')):
group.plot(kind='scatter', x=u'pH', y=u'alcohol', color=cmap(i / 5), ax=ax, label=key, alpha=0.5, grid=True)
dic = {'red': ((0, 0, 0), (0.5, 1, 1), (1, 1, 1)),
'green': ((0, 0, 0), (0.5, 1, 1), (1, 0, 0)),
'blue': ((0, 1, 1), (0.5, 0, 0), (1, 0, 0))}
tricolor_cmap = LinearSegmentedColormap('tricolor', dic)
plt.scatter(df1['pH'], df1['alcohol'], alpha=0.5, \
c=df1['quality'], cmap=tricolor_cmap)
plt.colorbar(label='quality')
plt.xlabel('pH')
plt.ylabel('alcohol')
plt.grid()
cmap = tricolor_cmap
colors = [cmap(c / 5) for c in np.arange(1, 6)]
fig, ax = plt.subplots(1, 1)
for i, (key, group) in enumerate(df1.groupby('quality')):
group.plot(kind='scatter', x=u'pH', y=u'alcohol', color=cmap(i / 5), ax=ax, label=key, alpha=0.5, grid=True)
pd.plotting.scatter_matrix(df1.dropna(axis=1)[df1.columns[:]], figsize=(20, 20))
plt.show()
cmap = plt.get_cmap('coolwarm')
colors = [cmap((c - 3)/ 5) for c in df1['quality'].tolist()]
pd.plotting.scatter_matrix(df1.dropna(axis=1)[df1.columns[:]], figsize=(20, 20), color=colors)
plt.show()
cmap = tricolor_cmap
colors = [cmap((c - 3)/ 5) for c in df1['quality'].tolist()]
pd.plotting.scatter_matrix(df1.dropna(axis=1)[df1.columns[:]], figsize=(20, 20), color=colors)
plt.show()
pd.DataFrame(np.corrcoef(df1.T.dropna().iloc[:, :].as_matrix().tolist()),
columns=df1.columns, index=df1.columns)
corrcoef = np.corrcoef(df1.dropna().iloc[:, :].T.as_matrix().tolist())
#plt.figure(figsize=(8, 8))
plt.imshow(corrcoef, interpolation='nearest', cmap=plt.cm.coolwarm)
plt.colorbar(label='correlation coefficient')
tick_marks = np.arange(len(corrcoef))
plt.xticks(tick_marks, df1.columns, rotation=90)
plt.yticks(tick_marks, df1.columns)
plt.tight_layout()
dfs = df1.apply(lambda x: (x-x.mean())/x.std(), axis=0).fillna(0)
dfs.head() # 先頭5行だけ表示
pca = PCA()
pca.fit(dfs.iloc[:, :10])
# データを主成分空間に写像 = 次元圧縮
feature = pca.transform(dfs.iloc[:, :10])
#plt.figure(figsize=(6, 6))
plt.scatter(feature[:, 0], feature[:, 1], alpha=0.5)
plt.title('Principal Component Analysis')
plt.xlabel('The first principal component')
plt.ylabel('The second principal component')
plt.grid()
plt.show()
# 累積寄与率を図示する
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.plot([0] + list(np.cumsum(pca.explained_variance_ratio_)), '-o')
plt.xlabel('Number of principal components')
plt.ylabel('Cumulative contribution ratio')
plt.grid()
plt.show()
pca = PCA()
pca.fit(dfs.iloc[:, :10])
# データを主成分空間に写像 = 次元圧縮
feature = pca.transform(dfs.iloc[:, :10])
#plt.figure(figsize=(6, 6))
plt.scatter(feature[:, 0], feature[:, 1], alpha=0.5, color=colors)
plt.title('Principal Component Analysis')
plt.xlabel('The first principal component')
plt.ylabel('The second principal component')
plt.grid()
plt.show()
pca = PCA()
pca.fit(dfs.iloc[:, :10].T)
# データを主成分空間に写像 = 次元圧縮
feature = pca.transform(dfs.iloc[:, :10].T)
#plt.figure(figsize=(6, 6))
for x, y, name in zip(feature[:, 0], feature[:, 1], dfs.columns[:10]):
plt.text(x, y, name, alpha=0.8, size=8)
plt.scatter(feature[:, 0], feature[:, 1], alpha=0.5)
plt.title('Principal Component Analysis')
plt.xlabel('The first principal component')
plt.ylabel('The second principal component')
plt.grid()
plt.show()
# 累積寄与率を図示する
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.plot([0] + list(np.cumsum(pca.explained_variance_ratio_)), '-o')
plt.xlabel('Number of principal components')
plt.ylabel('Cumulative contribution ratio')
plt.grid()
plt.show()
# 練習3.1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3 STYLE="background
Step2: <h3 STYLE="background
Step3: <h3 STYLE="background
Step4: matplotlib で定義済みのカラーマップで彩色できます。次の例では、quality に応じて coolwarm に従った彩色を行います。他のカラーマップの例は http
Step5: 同じような絵を描く方法はいくつもあって、たとえば次のように、微妙に仕上がりが違います。
Step6: 今回は quality は連続値ではなく離散値ですので、次のような描き方のほうが良いかもしれません。
Step7: もし、気に入った colormap がなければ、以下のように自作もできます。
Step8: <h3 STYLE="background
Step9: matplotlib で定義済みのカラーマップで彩色できます。次の例では、quality に応じて coolwarm に従った彩色を行います。他のカラーマップの例は http
Step10: 先ほどと同様、自作の colormap も使えます。
Step11: <h3 STYLE="background
Step12: 上のような数字だらけの表だと全体像を掴みづらいので、カラーマップにしてみましょう。
Step13: quality は alcohol と正の相関、 volatile acidity と負の相関にあることなどが見て取れます。
Step14: 機械学習のライブラリ sklearn の PCA を用いて主成分分析を行います。
Step15: 主成分分析では、個々の変数の線形結合を主成分として分析を行ないますので、それぞれの主成分がもとのデータをどの程度説明しているかを示す尺度が必要となります。それを寄与率といいます。また、寄与率を第1主成分から順に累積していったものを累積寄与率といいます。
Step16: これもやはり好きな色で彩色できます。
Step17: 行列の転置 .T をすることで、行と列を入れ替えて主成分分析を行うことができます。
Step18: <h4 style="padding
|
7,213
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
from pprint import pprint
import shutil
import subprocess
import urllib.request
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib.patches import Rectangle
import openmc.data
openmc.data.atomic_mass('Fe54')
openmc.data.NATURAL_ABUNDANCE['H2']
openmc.data.atomic_weight('C')
url = 'https://anl.box.com/shared/static/kxm7s57z3xgfbeq29h54n7q6js8rd11c.ace'
filename, headers = urllib.request.urlretrieve(url, 'gd157.ace')
# Load ACE data into object
gd157 = openmc.data.IncidentNeutron.from_ace('gd157.ace')
gd157
total = gd157[1]
total
total.xs
total.xs['294K'](1.0)
total.xs['294K']([1.0, 2.0, 3.0])
gd157.energy
energies = gd157.energy['294K']
total_xs = total.xs['294K'](energies)
plt.loglog(energies, total_xs)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
pprint(list(gd157.reactions.values())[:10])
n2n = gd157[16]
print('Threshold = {} eV'.format(n2n.xs['294K'].x[0]))
n2n.xs
xs = n2n.xs['294K']
plt.plot(xs.x, xs.y)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
plt.xlim((xs.x[0], xs.x[-1]))
n2n.products
neutron = n2n.products[0]
neutron.distribution
dist = neutron.distribution[0]
dist.energy_out
for e_in, e_out_dist in zip(dist.energy[::5], dist.energy_out[::5]):
plt.semilogy(e_out_dist.x, e_out_dist.p, label='E={:.2f} MeV'.format(e_in/1e6))
plt.ylim(top=1e-6)
plt.legend()
plt.xlabel('Outgoing energy (eV)')
plt.ylabel('Probability/eV')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
cm = matplotlib.cm.Spectral_r
# Determine size of probability tables
urr = gd157.urr['294K']
n_energy = urr.table.shape[0]
n_band = urr.table.shape[2]
for i in range(n_energy):
# Get bounds on energy
if i > 0:
e_left = urr.energy[i] - 0.5*(urr.energy[i] - urr.energy[i-1])
else:
e_left = urr.energy[i] - 0.5*(urr.energy[i+1] - urr.energy[i])
if i < n_energy - 1:
e_right = urr.energy[i] + 0.5*(urr.energy[i+1] - urr.energy[i])
else:
e_right = urr.energy[i] + 0.5*(urr.energy[i] - urr.energy[i-1])
for j in range(n_band):
# Determine maximum probability for a single band
max_prob = np.diff(urr.table[i,0,:]).max()
# Determine bottom of band
if j > 0:
xs_bottom = urr.table[i,1,j] - 0.5*(urr.table[i,1,j] - urr.table[i,1,j-1])
value = (urr.table[i,0,j] - urr.table[i,0,j-1])/max_prob
else:
xs_bottom = urr.table[i,1,j] - 0.5*(urr.table[i,1,j+1] - urr.table[i,1,j])
value = urr.table[i,0,j]/max_prob
# Determine top of band
if j < n_band - 1:
xs_top = urr.table[i,1,j] + 0.5*(urr.table[i,1,j+1] - urr.table[i,1,j])
else:
xs_top = urr.table[i,1,j] + 0.5*(urr.table[i,1,j] - urr.table[i,1,j-1])
# Draw rectangle with appropriate color
ax.add_patch(Rectangle((e_left, xs_bottom), e_right - e_left, xs_top - xs_bottom,
color=cm(value)))
# Overlay total cross section
ax.plot(gd157.energy['294K'], total.xs['294K'](gd157.energy['294K']), 'k')
# Make plot pretty and labeled
ax.set_xlim(1.0, 1.0e5)
ax.set_ylim(1e-1, 1e4)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Energy (eV)')
ax.set_ylabel('Cross section(b)')
gd157.export_to_hdf5('gd157.h5', 'w')
gd157_reconstructed = openmc.data.IncidentNeutron.from_hdf5('gd157.h5')
np.all(gd157[16].xs['294K'].y == gd157_reconstructed[16].xs['294K'].y)
h5file = h5py.File('gd157.h5', 'r')
main_group = h5file['Gd157/reactions']
for name, obj in sorted(list(main_group.items()))[:10]:
if 'reaction_' in name:
print('{}, {}'.format(name, obj.attrs['label'].decode()))
n2n_group = main_group['reaction_016']
pprint(list(n2n_group.values()))
n2n_group['294K/xs'][()]
# Download ENDF file
url = 'https://t2.lanl.gov/nis/data/data/ENDFB-VII.1-neutron/Gd/157'
filename, headers = urllib.request.urlretrieve(url, 'gd157.endf')
# Load into memory
gd157_endf = openmc.data.IncidentNeutron.from_endf(filename)
gd157_endf
elastic = gd157_endf[2]
elastic.xs
elastic.xs['0K'](0.0253)
gd157_endf.resonances.ranges
[(r.energy_min, r.energy_max) for r in gd157_endf.resonances.ranges]
# Create log-spaced array of energies
resolved = gd157_endf.resonances.resolved
energies = np.logspace(np.log10(resolved.energy_min),
np.log10(resolved.energy_max), 1000)
# Evaluate elastic scattering xs at energies
xs = elastic.xs['0K'](energies)
# Plot cross section vs energies
plt.loglog(energies, xs)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
resolved.parameters.head(10)
gd157.add_elastic_0K_from_endf('gd157.endf')
gd157[2].xs
# Download ENDF file
url = 'https://t2.lanl.gov/nis/data/data/ENDFB-VII.1-neutron/H/2'
filename, headers = urllib.request.urlretrieve(url, 'h2.endf')
# Run NJOY to create deuterium data
h2 = openmc.data.IncidentNeutron.from_njoy('h2.endf', temperatures=[300., 400., 500.], stdout=True)
h2[2].xs
url = 'https://github.com/mit-crpg/WMP_Library/releases/download/v1.1/092238.h5'
filename, headers = urllib.request.urlretrieve(url, '092238.h5')
u238_multipole = openmc.data.WindowedMultipole.from_hdf5('092238.h5')
u238_multipole(1.0, 294)
E = np.linspace(5, 25, 1000)
plt.semilogy(E, u238_multipole(E, 293.606)[1])
E = np.linspace(6.1, 7.1, 1000)
plt.semilogy(E, u238_multipole(E, 0)[1])
plt.semilogy(E, u238_multipole(E, 900)[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Physical Data
Step2: The IncidentNeutron class
Step3: Cross sections
Step4: Cross sections for each reaction can be stored at multiple temperatures. To see what temperatures are available, we can look at the reaction's xs attribute.
Step5: To find the cross section at a particular energy, 1 eV for example, simply get the cross section at the appropriate temperature and then call it as a function. Note that our nuclear data uses eV as the unit of energy.
Step6: The xs attribute can also be called on an array of energies.
Step7: A quick way to plot cross sections is to use the energy attribute of IncidentNeutron. This gives an array of all the energy values used in cross section interpolation for each temperature present.
Step8: Reaction Data
Step9: Let's suppose we want to look more closely at the (n,2n) reaction. This reaction has an energy threshold
Step10: The (n,2n) cross section, like all basic cross sections, is represented by the Tabulated1D class. The energy and cross section values in the table can be directly accessed with the x and y attributes. Using the x and y has the nice benefit of automatically acounting for reaction thresholds.
Step11: To get information on the energy and angle distribution of the neutrons emitted in the reaction, we need to look at the products attribute.
Step12: We see that the neutrons emitted have a correlated angle-energy distribution. Let's look at the energy_out attribute to see what the outgoing energy distributions are.
Step13: Here we see we have a tabulated outgoing energy distribution for each incoming energy. Note that the same probability distribution classes that we could use to create a source definition are also used within the openmc.data package. Let's plot every fifth distribution to get an idea of what they look like.
Step14: Unresolved resonance probability tables
Step15: Exporting HDF5 data
Step16: With few exceptions, the HDF5 file encodes the same data as the ACE file.
Step17: And one of the best parts of using HDF5 is that it is a widely used format with lots of third-party support. You can use h5py, for example, to inspect the data.
Step18: So we see that the hierarchy of data within the HDF5 mirrors the hierarchy of Python objects that we manipulated before.
Step19: Working with ENDF files
Step20: Just as before, we can get a reaction by indexing the object directly
Step21: However, if we look at the cross section now, we see that it isn't represented as tabulated data anymore.
Step22: If had Cython installed when you built/installed OpenMC, you should be able to evaluate resonant cross sections from ENDF data directly, i.e., OpenMC will reconstruct resonances behind the scenes for you.
Step23: When data is loaded from an ENDF file, there is also a special resonances attribute that contains resolved and unresolved resonance region data (from MF=2 in an ENDF file).
Step24: We see that $^{157}$Gd has a resolved resonance region represented in the Reich-Moore format as well as an unresolved resonance region. We can look at the min/max energy of each region by doing the following
Step25: With knowledge of the energy bounds, let's create an array of energies over the entire resolved resonance range and plot the elastic scattering cross section.
Step26: Resonance ranges also have a useful parameters attribute that shows the energies and widths for resonances.
Step27: Heavy-nuclide resonance scattering
Step28: Let's check to make sure that we have both the room temperature elastic scattering cross section as well as a 0K cross section.
Step29: Generating data from NJOY
Step30: Now we can use our h2 object just as we did before.
Step31: Note that 0 K elastic scattering data is automatically added when using from_njoy() so that resonance elastic scattering treatments can be used.
Step32: The WindowedMultipole object can be called with energy and temperature values. Calling the object gives a tuple of 3 cross sections
Step33: An array can be passed for the energy argument.
Step34: The real advantage to multipole is that it can be used to generate cross sections at any temperature. For example, this plot shows the Doppler broadening of the 6.67 eV resonance between 0 K and 900 K.
|
7,214
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-2', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
7,215
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.cluster import KMeans
# Step 1: Initalise kmeans clustering model for 5 clusters and
# fit on training data
k_means = KMeans(n_clusters=5,
random_state=101)
k_means.fit(X_train)
# Step 2: Predict cluster for training and test data and add results
# as a column to the respective dataframes
X_train['cluster'] = k_means.predict(X_train)
X_test['cluster'] = k_means.predict(X_test)
# Step 3: Print out cluster center arrays and inertia value
print('Cluster Centers:', k_means.cluster_centers_)
print('Inertia:', k_means.inertia_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,216
|
<ASSISTANT_TASK:>
Python Code:
# Import py_entitymatching package
import py_entitymatching as em
import os
import pandas as pd
# Get the datasets directory
datasets_dir = em.get_install_path() + os.sep + 'datasets'
# Get the paths of the input tables
path_A = datasets_dir + os.sep + 'person_table_A.csv'
path_B = datasets_dir + os.sep + 'person_table_B.csv'
# Read the CSV files and set 'ID' as the key attribute
A = em.read_csv_metadata(path_A, key='ID')
B = em.read_csv_metadata(path_B, key='ID')
# Get features
feature_table = em.get_features_for_blocking(A, B)
type(feature_table)
feature_table.head()
# Drop first row
feature_table = feature_table.drop(0)
feature_table.head()
#Remove all the features except involving name (Include only the features where the left attribute is name)
feature_table = feature_table[feature_table.left_attribute=='name']
feature_table
#Remove all the features except involving jaccard (Include only the features where the sim function is jaccard)
feature_table = feature_table[feature_table.simfunction=='jaccard']
feature_table
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then, read the (sample) input tables for blocking purposes
Step2: Removing Features from Feature Table
|
7,217
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'snu', 'sandbox-1', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
7,218
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pydotplus
import numpy as np
import pprint
from sklearn import metrics
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
from sklearn.tree import _tree
from IPython.display import display, Image
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from functools import reduce
# Import our custom utilities
from imp import reload
from utils import utils
reload(utils)
%timeit
X_train, X_test, y_train, y_test, rf = utils.generate_rf_example(sklearn_ds = load_breast_cancer()
, train_split_propn = 0.9
, n_estimators = 3
, random_state_split = 2017
, random_state_classifier = 2018)
print("Training feature dimensions", X_train.shape, sep = ":\n")
print("\n")
print("Training outcome dimensions", y_train.shape, sep = ":\n")
print("\n")
print("Test feature dimensions", X_test.shape, sep = ":\n")
print("\n")
print("Test outcome dimensions", y_test.shape, sep = ":\n")
print("\n")
print("first 5 rows of the training set features", X_train[:5], sep = ":\n")
print("\n")
print("first 5 rows of the training set outcomes", y_train[:5], sep = ":\n")
X_train.shape[0]
breast_cancer = load_breast_cancer()
breast_cancer.data.shape[0]
# Import our custom utilities
rf.n_estimators
estimator0 = rf.estimators_[0] # First tree
estimator1 = rf.estimators_[1] # Second tree
estimator2 = rf.estimators_[2] # Second tree
tree_dat0 = utils.getTreeData(X_train = X_train, dtree = estimator0, root_node_id = 0)
tree_dat1 = utils.getTreeData(X_train = X_train, dtree = estimator1, root_node_id = 0)
tree_dat1 = utils.getTreeData(X_train = X_train, dtree = estimator2, root_node_id = 0)
# Now plot the trees individually
utils.draw_tree(decision_tree = estimator0)
utils.prettyPrintDict(inp_dict = tree_dat0)
# Count the number of samples passing through the leaf nodes
sum(tree_dat0['tot_leaf_node_values'])
feature_importances = rf.feature_importances_
std = np.std([dtree.feature_importances_ for dtree in rf.estimators_]
, axis=0)
feature_importances_rank_idx = np.argsort(feature_importances)[::-1]
# Check that the feature importances are standardized to 1
print(sum(feature_importances))
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1
, feature_importances_rank_idx[f]
, feature_importances[feature_importances_rank_idx[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train.shape[1])
, feature_importances[feature_importances_rank_idx]
, color="r"
, yerr = std[feature_importances_rank_idx], align="center")
plt.xticks(range(X_train.shape[1]), feature_importances_rank_idx)
plt.xlim([-1, X_train.shape[1]])
plt.show()
# CHECK: If the random forest objects are going to be really large in size
# we could just omit them and only return our custom summary outputs
rf_metrics = utils.getValidationMetrics(rf, y_true = y_test, X_test = X_test)
all_rf_outputs = {"rf_obj" : rf,
"feature_importances" : feature_importances,
"feature_importances_rank_idx" : feature_importances_rank_idx,
"rf_metrics" : rf_metrics}
# CHECK: The following should be paralellized!
# CHECK: Whether we can maintain X_train correctly as required
for idx, dtree in enumerate(rf.estimators_):
dtree_out = utils.getTreeData(X_train = X_train, dtree = dtree, root_node_id = 0)
# Append output to dictionary
all_rf_outputs["dtree" + str(idx)] = dtree_out
utils.prettyPrintDict(inp_dict = all_rf_outputs)
utils.prettyPrintDict(inp_dict = all_rf_outputs['rf_metrics'])
all_rf_outputs['dtree0']
uniq_feature_paths = all_rf_outputs['dtree0']['all_uniq_leaf_paths_features']
leaf_node_classes = all_rf_outputs['dtree0']['all_leaf_node_classes']
ones_only = [i for i, j in zip(uniq_feature_paths, leaf_node_classes)
if j == 1]
ones_only
print("Number of leaf nodes", len(all_rf_outputs['dtree0']['all_uniq_leaf_paths_features']), sep = ":\n")
print("Number of leaf nodes with 1 class", len(ones_only), sep = ":\n")
# Just pick the last seven cases, we are going to manually construct
# binary RIT of depth 3 i.e. max 2**3 -1 = 7 intersecting nodes
ones_only_seven = ones_only[-7:]
ones_only_seven
# Construct a binary version of the RIT manually!
# This should come in useful for unit tests!
node0 = ones_only_seven[-1]
node1 = np.intersect1d(node0, ones_only_seven[-2])
node2 = np.intersect1d(node1, ones_only_seven[-3])
node3 = np.intersect1d(node1, ones_only_seven[-4])
node4 = np.intersect1d(node0, ones_only_seven[-5])
node5 = np.intersect1d(node4, ones_only_seven[-6])
node6 = np.intersect1d(node4, ones_only_seven[-7])
intersected_nodes_seven = [node0, node1, node2, node3, node4, node5, node6]
for idx, node in enumerate(intersected_nodes_seven):
print("node" + str(idx), node)
rit_output = reduce(np.union1d, (node2, node3, node5, node6))
rit_output
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Check out the data
Step3: Step 2
Step4: Design the single function to get the key tree information
Step5: Decision Tree 0 (First) - Get output
Step6: Step 3
Step7: Step 3.2 Display Feature Importances Graphically (just for interest)
Step8: Putting it all together
Step9: Check the final dictionary of outputs
Step10: Now we can start setting up the RIT class
Step11: Get the leaf node 1's paths
|
7,219
|
<ASSISTANT_TASK:>
Python Code:
from pd_grid import PD_Model
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec
%matplotlib inline
bwr = plt.get_cmap("bwr")
def draw_grid(model, ax=None):
'''
Draw the current state of the grid, with Defecting agents in red
and Cooperating agents in blue.
'''
if not ax:
fig, ax = plt.subplots(figsize=(6,6))
grid = np.zeros((model.grid.width, model.grid.height))
for agent, x, y in model.grid.coord_iter():
if agent.move == "D":
grid[y][x] = 1
else:
grid[y][x] = 0
ax.pcolormesh(grid, cmap=bwr, vmin=0, vmax=1)
ax.axis('off')
ax.set_title("Steps: {}".format(model.schedule.steps))
def run_model(model):
'''
Run an experiment with a given model, and plot the results.
'''
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(212)
draw_grid(model, ax1)
model.run(10)
draw_grid(model, ax2)
model.run(10)
draw_grid(model, ax3)
model.datacollector.get_model_vars_dataframe().plot(ax=ax4)
# Set the random seed
seed = 21
random.seed(seed)
m = PD_Model(50, 50, "Sequential")
run_model(m)
random.seed(seed)
m = PD_Model(50, 50, "Random")
run_model(m)
random.seed(seed)
m = PD_Model(50, 50, "Simultaneous")
run_model(m)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Helper functions
Step2: Sequential Activation
Step3: Random Activation
Step4: Simultaneous Activation
|
7,220
|
<ASSISTANT_TASK:>
Python Code:
import IPython
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
import sys
import pickle
import scipy as sp
from scipy import stats
from pandas import Series, DataFrame
from datetime import datetime, timedelta
%matplotlib inline
matplotlib.style.use('classic')
matplotlib.rcParams['figure.facecolor'] = 'w'
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 100)
pd.set_option('mode.chained_assignment', None)
print("Python version: {}".format(sys.version))
print("IPython version: {}".format(IPython.__version__))
print("pandas version: {}".format(pd.__version__))
print("matplotlib version: {}".format(matplotlib.__version__))
print("NumPy version: {}".format(np.__version__))
print("SciPy version: {}".format(sp.__version__))
from gb_loader import *
from gb_stats import *
from gb_transform import *
# Topic of the Notebook which will also be the name of the subfolder containing results
TOPIC = 'alarms_2'
# Name of the external hard drive
DRIVE = 'GUSZTI'
# Directory containing clinical and blood gas data
CWD = '/Users/guszti/ventilation_data'
# Directory on external drive to read the ventilation data from
DIR_READ = '/Volumes/%s/ventilation_data' % DRIVE
# Directory to write results and selected images to
if not os.path.isdir('%s/%s/%s' % (CWD, 'Analyses', TOPIC)):
os.makedirs('%s/%s/%s' % (CWD, 'Analyses', TOPIC))
DIR_WRITE = '%s/%s/%s' % (CWD, 'Analyses', TOPIC)
# Images and raw data will be written on an external hard drive
if not os.path.isdir('/Volumes/%s/data_dump/%s' % (DRIVE, TOPIC)):
os.makedirs('/Volumes/%s/data_dump/%s' % (DRIVE, TOPIC))
DATA_DUMP = '/Volumes/%s/data_dump/%s' % (DRIVE, TOPIC)
os.chdir(CWD)
os.getcwd()
DIR_READ
DIR_WRITE
DATA_DUMP
# One recording from each patient, all of them 24 hours old or longer
# The sub folders containing the individual recordings have the same names within cwd
recordings = ['DG001', 'DG002_1', 'DG003', 'DG004', 'DG005_1', 'DG006_2', 'DG007', 'DG008', 'DG009', 'DG010',
'DG011', 'DG013', 'DG014', 'DG015', 'DG016', 'DG017', 'DG018_1', 'DG020',
'DG021', 'DG022', 'DG023', 'DG025', 'DG026', 'DG027', 'DG028', 'DG029', 'DG030',
'DG031', 'DG032_2', 'DG033', 'DG034', 'DG035', 'DG037', 'DG038_1', 'DG039', 'DG040_1', 'DG041',
'DG042', 'DG043', 'DG044', 'DG045', 'DG046_2', 'DG047', 'DG048', 'DG049', 'DG050']
clinical_details = pd.read_excel('%s/data_grabber_patient_data_combined.xlsx' % CWD)
clinical_details.index = clinical_details['Recording']
clinical_details.info()
current_weights = {}
for recording in recordings:
current_weights[recording] = clinical_details.loc[recording, 'Current weight' ] / 1000
slow_measurements = {}
for recording in recordings:
flist = os.listdir('%s/%s' % (DIR_READ, recording))
flist = [file for file in flist if not file.startswith('.')] # There are some hidden
# files on the hard drive starting with '.'; this step is necessary to ignore them
files = slow_measurement_finder(flist)
print('Loading recording %s' % recording)
print(files)
fnames = ['%s/%s/%s' % (DIR_READ, recording, filename) for filename in files]
slow_measurements[recording] = data_loader(fnames)
# 46 recordings from 46 patients (4 recordings excluded as they were < 24 hours lon)
len(slow_measurements)
for recording in recordings:
try:
a = slow_measurements[recording]
a['VT_kg'] = a['5001|VT [mL]'] / current_weights[recording]
a['VTi_kg'] = a['5001|VTi [mL]'] / current_weights[recording]
a['VTe_kg'] = a['5001|VTe [mL]'] / current_weights[recording]
a['VTmand_kg'] = a['5001|VTmand [mL]'] / current_weights[recording]
a['VTspon_kg'] = a['5001|VTspon [mL]'] / current_weights[recording]
a['VTimand_kg'] = a['5001|VTimand [mL]'] / current_weights[recording]
a['VTemand_kg'] = a['5001|VTemand [mL]'] / current_weights[recording]
a['VTispon_kg'] = a['5001|VTispon [mL]'] / current_weights[recording]
a['VTespon_kg'] = a['5001|VTespon [mL]'] / current_weights[recording]
except KeyError:
# print('%s does not have all of the parameters' % recording)
pass
for recording in recordings:
try:
a = slow_measurements[recording]
a['VThf_kg'] = a['5001|VThf [mL]'] / current_weights[recording]
a['DCO2_corr_kg'] = a['5001|DCO2 [10*mL^2/s]'] * 10 / (current_weights[recording]) ** 2
except KeyError:
# print('%s does not have all of the parameters' % recording)
pass
for recording in recordings:
try:
a = slow_measurements[recording]
a['MV_kg'] = a['5001|MV [L/min]'] / current_weights[recording]
a['MVi_kg'] = a['5001|MVi [L/min]'] / current_weights[recording]
a['MVe_kg'] = a['5001|MVe [L/min]'] / current_weights[recording]
a['MVemand_kg'] = a['5001|MVemand [L/min]'] / current_weights[recording]
a['MVespon_kg'] = a['5001|MVespon [L/min]'] / current_weights[recording]
a['MVleak_kg'] = a['5001|MVleak [L/min]'] / current_weights[recording]
except KeyError:
# print('%s does not have all of the parameters' % recording)
pass
# 1/sec data are retrieved in two parts which need to be joined
# This resampling steps combines the two parts
for recording in recordings:
slow_measurements[recording] = slow_measurements[recording].resample('1S').mean()
# Example
slow_measurements['DG003'].head();
len(recordings)
rec1 = recordings[:15]; rec2 = recordings[15:30]; rec3 = recordings[30:40]; rec4 = recordings[40:]
# Time stamps are obtained from 'slow measurements'
recording_duration = {}
for recording in recordings:
recording_duration[recording] = slow_measurements[recording].index[-1] - slow_measurements[recording].index[0]
recording_duration_seconds = {}
recording_duration_hours = {}
for recording in recordings:
temp = recording_duration[recording]
recording_duration_seconds[recording] = temp.total_seconds()
recording_duration_hours[recording] = temp.total_seconds() / 3600
v = list(range(1, len(recordings)+1))
w = [value for key, value in sorted(recording_duration_hours.items()) if key in recordings]
fig = plt.figure()
fig.set_size_inches(20, 10)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax1 = fig.add_subplot(1, 1, 1);
ax1.bar(v, w, color = 'blue')
plt.xlabel("Recordings", fontsize = 22)
plt.ylabel("Hours", fontsize = 22)
plt.title("Recording periods" , fontsize = 22)
plt.yticks(fontsize = 22)
plt.xticks([i+1.5 for i, _ in enumerate(recordings)], recordings, fontsize = 22, rotation = 'vertical');
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'recording_durations.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
recording_times_frame = DataFrame([recording_duration, recording_duration_hours, recording_duration_seconds],
index = ['days', 'hours', 'seconds'])
recording_times_frame
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'recording_periods.xlsx'))
recording_times_frame.to_excel(writer,'rec_periods')
writer.save()
vent_settings = {}
for recording in recordings:
flist = os.listdir('%s/%s' % (DIR_READ, recording))
flist = [file for file in flist if not file.startswith('.')] # There are some hidden
# files on the hard drive starting with '.'; this step is necessary to ignore them
files = slow_setting_finder(flist)
# print('Loading recording %s' % recording)
# print(files)
fnames = ['%s/%s/%s' % (DIR_READ, recording, filename) for filename in files]
vent_settings[recording] = data_loader(fnames)
# remove less important ventilator settings to simplify the table
vent_settings_selected = {}
for recording in recordings:
vent_settings_selected[recording] = vent_settings_cleaner(vent_settings[recording])
# Create a another dictionary of Dataframes wit some of the ventilation settings (set VT, set RR, set Pmax)
lsts = [(['VT_weight'], ['VTi', 'VThf']), (['RR_set'], ['RR']), (['Pmax'], ['Pmax', 'Ampl hf max'])]
vent_settings_2 = {}
for recording in recordings:
frmes = []
for name, pars in lsts:
if pars in [['VTi', 'VThf']]:
ind = []
val = []
for index, row in vent_settings_selected[recording].iterrows():
if row['Id'] in pars:
ind.append(index)
val.append(row['Value New'] / current_weights[recording])
frmes.append(DataFrame(val, index = ind, columns = name))
else:
ind = []
val = []
for index, row in vent_settings_selected[recording].iterrows():
if row['Id'] in pars:
ind.append(index)
val.append(row['Value New'])
frmes.append(DataFrame(val, index = ind, columns = name))
vent_settings_2[recording] = pd.concat(frmes)
vent_settings_2[recording].drop_duplicates(inplace = True)
vent_modes = {}
for recording in recordings:
flist = os.listdir('%s/%s' % (DIR_READ, recording))
flist = [file for file in flist if not file.startswith('.')] # There are some hidden
# files on the hard drive starting with '.'; this step is necessary to ignore them
files = slow_text_finder(flist)
# print('Loading recording %s' % recording)
# print(files)
fnames = ['%s/%s/%s' % (DIR_READ, recording, filename) for filename in files]
vent_modes[recording] = data_loader(fnames)
# remove less important ventilator mode settings to simplify the table
vent_modes_selected = {}
for recording in recordings:
vent_modes_selected[recording] = vent_mode_cleaner(vent_modes[recording])
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'ventilator_settings.xlsx'))
for recording in recordings:
vent_settings[recording].to_excel(writer,'%s' % recording)
writer.save()
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'ventilator_settings_selected.xlsx'))
for recording in recordings:
vent_settings_selected[recording].to_excel(writer,'%s' % recording)
writer.save()
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'ventilator_settings_2.xlsx'))
for recording in recordings:
vent_settings_2[recording].to_excel(writer,'%s' % recording)
writer.save()
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'ventilator_modes.xlsx'))
for recording in recordings:
vent_modes[recording].to_excel(writer,'%s' % recording)
writer.save()
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'ventilator_modes_selected.xlsx'))
for recording in recordings:
vent_modes_selected[recording].to_excel(writer,'%s' % recording)
writer.save()
alarm_settings = {}
for recording in recordings:
flist = os.listdir('%s/%s' % (DIR_READ, recording))
flist = [file for file in flist if not file.startswith('.')] # There are some hidden
# files on the hard drive starting with '.'; this step is necessary to ignore them
files = alarm_setting_finder(flist)
# print('Loading recording %s' % recording)
# print(files)
fnames = ['%s/%s/%s' % (DIR_READ, recording, filename) for filename in files]
alarm_settings[recording] = data_loader(fnames)
# Remove etCO2 limits which were not used
alarm_settings_selected = {}
for recording in recordings:
alarm_settings_selected[recording] = alarm_setting_cleaner(alarm_settings[recording])
# Create a another dictionary of Dataframes with some of the alarm settings
lsts = [(['MV_high_weight'], ['MVe_HL']), (['MV_low_weight'], ['MVe_LL']),
(['PIP_high'], ['PIP_HL']), (['RR_high'], ['RR_HL'])]
alarm_settings_2 = {}
for recording in recordings:
frmes = []
for name, pars in lsts:
if pars in [['MVe_HL'], ['MVe_LL']]:
ind = []
val = []
for index, row in alarm_settings_selected[recording].iterrows():
if row['Id'] in pars:
ind.append(index)
val.append(row['Value New'] / current_weights[recording])
frmes.append(DataFrame(val, index = ind, columns = name))
else:
ind = []
val = []
for index, row in alarm_settings_selected[recording].iterrows():
if row['Id'] in pars:
ind.append(index)
val.append(row['Value New'])
frmes.append(DataFrame(val, index = ind, columns = name))
alarm_settings_2[recording] = pd.concat(frmes)
alarm_settings_2[recording].drop_duplicates(inplace = True)
# Write DataFrames containing alarm settings to a multisheet Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'alarm_settings.xlsx'))
for recording in recordings:
alarm_settings[recording].to_excel(writer,'%s' % recording)
writer.save()
# Write DataFrames containing alarm settings to a multisheet Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'alarm_settings_2.xlsx'))
for recording in recordings:
alarm_settings_2[recording].to_excel(writer,'%s' % recording)
writer.save()
alarm_states = {}
for recording in recordings:
flist = os.listdir('%s/%s' % (DIR_READ, recording))
flist = [file for file in flist if not file.startswith('.')] # There are some hidden
# files on the hard drive starting with '.'; this step is necessary to ignore them
files = alarm_state_finder(flist)
# print('Loading recording %s' % recording)
# print(files)
fnames = ['%s/%s/%s' % (DIR_READ, recording, filename) for filename in files]
alarm_states[recording] = data_loader(fnames)
# Write DataFrames containing alarm states to a multisheet Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'alarm_states.xlsx'))
for recording in recordings:
alarm_states[recording].to_excel(writer,'%s' % recording)
writer.save()
total_recording_time = timedelta(0)
for recording in recordings:
total_recording_time += recording_duration[recording]
total_recording_time
mean_recording_time = total_recording_time / len(recordings)
mean_recording_time
# Define function to retrieve alarm events from alarm timing data
def alarm_events_calculator(dframe, al):
'''
DataFrame, str -> DataFrame
dframe: DataFrame containing alarm states
al: alarm category (string)
Returns a pd.DataFrame object with the time stamps when the alarm went off and the duration (in seconds)
of the alarm for alarm 'al' in recording 'rec'
'''
alarms = dframe
alarm = alarms[alarms.Name == al]
length = len(alarm)
delta = np.array([(alarm.Date_Time[i] - alarm.Date_Time[i-1]).total_seconds()
for i in range(1, length) if alarm['State New'][i] == 'NotActive' and alarm['State New'][i-1] == 'Active'])
stamp = np.array([alarm.index[i-1]
for i in range(1, length) if alarm['State New'][i] == 'NotActive' and alarm['State New'][i-1] == 'Active'])
data = {'duration_seconds': delta, 'time_went_off': stamp,}
alarm_t = DataFrame(data, columns = ['time_went_off', 'duration_seconds'])
return alarm_t
# Create a list of alarms occurring during each recording
alarm_list = {}
for recording in recordings:
alarm_list[recording] = sorted(set(alarm_states[recording].Name))
alarm_events = {}
for recording in recordings:
alarm_events[recording] = {}
for alarm in alarm_list[recording]:
alarm_events[recording][alarm] = alarm_events_calculator(alarm_states[recording], alarm)
# Write Dataframes containing the alarm events in Excel files,
# one Excel file for each recording
for recording in recordings:
writer = pd.ExcelWriter('%s/%s%s' % (DIR_WRITE, recording, '_alarm_events.xlsx'))
for alarm in alarm_list[recording]:
alarm_events[recording][alarm].to_excel(writer, alarm[:20])
writer.save()
def alarm_stats_calculator(dframe, rec, al):
'''
dframe: DataFrame containing alarm events
rec: recording (string)
al: alarm (string)
Returns detailed statistics about a particular alarm (al) in a particular recording (rec);
- number of times alarm went off and its value normalized to 24 hour periods
- mean, median, standard deviation, mean absolute deviation, minimum, 25th centile, 75th centile, maximum
time period when the alarm was off
- the total amount of time the alarm was off and its relative value in percent as the total recording time
'''
alarm = dframe[al].duration_seconds
return (alarm.size, round((alarm.size / (recording_duration_hours[rec] / 24)), 1),
round(alarm.mean() , 1), round(alarm.median(), 1), round(alarm.std(), 1), round(alarm.min() , 1),
round(alarm.quantile(0.25), 1), round(alarm.quantile(0.75), 1), round(alarm.max(), 1),
round(alarm.sum(), 1), round(alarm.sum() * 100 / recording_duration_seconds[rec] ,3))
alarm_stats = {}
for recording in recordings:
alarm_stats[recording] = {}
for alarm in alarm_list[recording]:
data = alarm_stats_calculator(alarm_events[recording], recording, alarm)
frame = DataFrame([data], columns = ['number of events', 'number of event per 24h',
'mean duration (s)', 'median duration (s)', 'SD duration (s)',
'miminum duration (s)',
'duration 25th centile (s)', 'duration 75th centile (s)',
'maximum duration (s)', 'cumulative duration (s)',
'percentage of recording length (%)'], index = [alarm])
alarm_stats[recording][alarm] = frame
# Write descriptive statistics in a multisheet Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'alarm_stats.xlsx'))
for recording in recordings:
stats = []
for alarm in alarm_stats[recording]:
stats.append(alarm_stats[recording][alarm])
stats_all = pd.concat(stats)
stats_all.to_excel(writer, recording)
writer.save()
# Generates a plot with the cumulative times (in seconds) of the various alarm occurring during recording (rec).
# Displays the plot
def alarm_plot_1(rec):
fig = plt.figure()
fig.set_size_inches(25, 8)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1)
xs = [i + 0.1 for i, _ in enumerate(alarm_list[rec])]
stats = []
for alarm in alarm_list[rec]:
stats.append(alarm_stats[rec][alarm]['cumulative duration (s)'])
stats_all = pd.concat(stats)
plt.barh(xs, stats_all, color = 'red')
plt.xlabel("seconds", fontsize = 24)
plt.title("Recording %s : How long was the alarm active over the %d seconds of recording?" % (rec,
recording_duration_seconds[rec]), fontsize = 22)
plt.yticks([i + 0.5 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 22)
plt.xticks(fontsize = 20)
# Generates a plot with the cumulative times (in seconds) of the various alarm occurring during recording (rec).
# Does not displays the plot but write it into a jpg file.
# NB: the resolution of the image is only 100 dpi - for publication quality higher is needed
def alarm_plot_1_write(rec):
fig = plt.figure()
fig.set_size_inches(25, 8)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1)
xs = [i + 0.1 for i, _ in enumerate(alarm_list[rec])]
stats = []
for alarm in alarm_list[rec]:
stats.append(alarm_stats[rec][alarm]['cumulative duration (s)'])
stats_all = pd.concat(stats)
plt.barh(xs, stats_all, color = 'red')
plt.xlabel("seconds", fontsize = 24)
plt.title("Recording %s : How long was the alarm active over the %d seconds of recording?" % (rec,
recording_duration_seconds[rec]), fontsize = 22)
plt.yticks([i + 0.5 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 22)
plt.xticks(fontsize = 20)
fig.savefig('%s/%s_%s.jpg' % (dir_write, 'alarm_durations_1', rec), dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
plt.close(fig)
# Generates a plot with the cumulative times (expressed as percentage of the total recording time)
# of the various alarm occurring during recording (rec).
# Displays the plot
def alarm_plot_2(rec):
fig = plt.figure()
fig.set_size_inches(25, 8)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1)
xs = [i + 0.1 for i, _ in enumerate(alarm_list[rec])]
stats = []
for alarm in alarm_list[rec]:
stats.append(alarm_stats[rec][alarm]['percentage of recording length (%)'])
stats_all = pd.concat(stats)
plt.barh(xs, stats_all, color = 'red')
plt.xlabel("% of total recording time", fontsize = 24)
plt.title("Recording %s: How long the alarm active over the %s hours of recording?" % (rec,
str(recording_duration[rec])), fontsize = 22)
plt.yticks([i + 0.5 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 22)
plt.xticks(fontsize = 20)
# Generates a plot with the cumulative times (expressed as percentage of the total recording time)
# of the various alarm occurring during recording (rec).
# Does not displays the plot but write it into a jpg file.
# NB: the resolution of the image is only 100 dpi - for publication quality higher is needed
def alarm_plot_2_write(rec):
fig = plt.figure()
fig.set_size_inches(25, 8)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1)
xs = [i + 0.1 for i, _ in enumerate(alarm_list[rec])]
stats = []
for alarm in alarm_list[rec]:
stats.append(alarm_stats[rec][alarm]['percentage of recording length (%)'])
stats_all = pd.concat(stats)
plt.barh(xs, stats_all, color = 'red')
plt.xlabel("% of total recording time", fontsize = 24)
plt.title("Recording %s: How long the alarm active over the %s hours of recording?" % (rec,
str(recording_duration[rec])), fontsize = 22)
plt.yticks([i + 0.5 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 22)
plt.xticks(fontsize = 20)
fig.savefig('%s/%s_%s.jpg' % (dir_write, 'alarm_durations_2', rec), dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
plt.close(fig)
# Displays the individual alarm events of the recording (rec) along the time axis
# Displays the plot
def alarm_plot_3(rec):
alarm_state = alarm_states[rec]
numbered = Series(np.zeros(len(alarm_state)), index = alarm_state.index)
for i in range(1, len(alarm_state)):
if alarm_state.iloc[i]['State New'] == 'Active':
numbered[i] = alarm_list[rec].index(alarm_state.iloc[i]['Id']) + 1
fig = plt.figure()
fig.set_size_inches(17, 8)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.plot(alarm_state.index, numbered, '|', color = 'red', markersize = 16, markeredgewidth = 1 )
plt.xlabel("Time", fontsize = 20)
plt.title("Alarm events during recording %s" % rec , fontsize = 24)
plt.yticks([i+1 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 18);
plt.xticks(fontsize = 14, rotation = 30)
plt.ylim(0.5, len(alarm_list[rec]) + 0.5);
# Displays the individual alarm events of recording (rec) along the time axis
# Does not displays the plot but write it into a jpg file.
# NB: the resolution of the image is only 100 dpi - for publication quality higher is needed
def alarm_plot_3_write(rec):
alarm_state = alarm_states[rec]
numbered = Series(np.zeros(len(alarm_state)), index = alarm_state.index)
for i in range(1, len(alarm_state)):
if alarm_state.iloc[i]['State New'] == 'Active':
numbered[i] = alarm_list[rec].index(alarm_state.iloc[i]['Id']) + 1
fig = plt.figure()
fig.set_size_inches(17, 8)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.plot(alarm_state.index, numbered, '|', color = 'red', markersize = 16, markeredgewidth = 1 )
plt.xlabel("Time", fontsize = 20)
plt.title("Alarm events during recording %s" % rec , fontsize = 24)
plt.yticks([i+1 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 18);
plt.xticks(fontsize = 14, rotation = 30)
plt.ylim(0.5, len(alarm_list[rec]) + 0.5)
fig.savefig('%s/%s_%s.pdf' % (dir_write, 'individual_alarms', rec), dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='pdf',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
plt.close(fig)
alarm_plot_1('DG032_2')
alarm_plot_2('DG032_2')
alarm_plot_3('DG032_2')
total_alarm_number_recordings = {} # dictionary containing the total number of alarm events in each recording
for recording in recordings:
total = 0
for alarm in alarm_list[recording]:
total += len(alarm_events[recording][alarm].index)
total_alarm_number_recordings[recording] = total
total_alarm_number_recordings_24H = {} # dictionary containing the total number of alarm events in each recording
# corrected for 24 hour period
for recording in recordings:
total_alarm_number_recordings_24H[recording] = (total_alarm_number_recordings[recording] /
(recording_duration[recording].total_seconds() / 86400))
alarm_durations_recordings = {} # a dictionary of Series. Each series contains all the alarm durations of a recording
for recording in recordings:
durations = []
for alarm in alarm_list[recording]:
durations.append(alarm_events[recording][alarm]['duration_seconds'])
durations = pd.concat(durations)
alarm_durations_recordings[recording] = durations
# Dictionaries containing various descriptive statistics for each recording
mean_alarm_duration_recordings = {}
median_alarm_duration_recordings = {}
sd_alarm_duration_recordings = {}
mad_alarm_duration_recordings = {}
min_alarm_duration_recordings = {}
pc25_alarm_duration_recordings = {}
pc75_alarm_duration_recordings = {}
max_alarm_duration_recordings = {}
for recording in recordings:
mean_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].mean(), 4)
median_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].median(), 4)
sd_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].std(), 4)
mad_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].mad(), 4)
min_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].min(), 4)
pc25_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].quantile(0.25), 4)
pc75_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].quantile(0.75), 4)
max_alarm_duration_recordings[recording] = round(alarm_durations_recordings[recording].max(), 4)
# Create DataFrame containing cumulative alarm statistics for each recording
alarm_stats_cum_rec = DataFrame([total_alarm_number_recordings,
total_alarm_number_recordings_24H,
mean_alarm_duration_recordings,
median_alarm_duration_recordings,
sd_alarm_duration_recordings,
mad_alarm_duration_recordings,
min_alarm_duration_recordings,
pc25_alarm_duration_recordings,
pc75_alarm_duration_recordings,
max_alarm_duration_recordings],
index = ['count', 'count per 24h', 'mean duration (sec)', 'median duration (sec)', 'sd duration (sec)',
'mad duration (sec)', 'min duration (sec)', '25th cent duration (sec)', '75th cent duration (sec)',
'max duration (sec)'])
alarm_stats_cum_rec.round(2)
# Write statistics to Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'alarm_stats_cum_rec.xlsx'))
alarm_stats_cum_rec.round(2).to_excel(writer, 'cumulative_stats')
writer.save()
# Plot the absolute number of alarm events for each recording
fig = plt.figure()
fig.set_size_inches(12, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(recordings)+1)), alarm_stats_cum_rec.loc['count', :], color = 'blue')
plt.ylabel("Recordings", fontsize = 22)
plt.xlabel("", fontsize = 22)
plt.title("Number of alarm events" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(recordings)], recordings, rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'number_events_rec.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Plot the number of alarm events in each recording normalised for 24 hour periods
fig = plt.figure()
fig.set_size_inches(12, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(recordings)+1)), alarm_stats_cum_rec.loc['count per 24h', :], color = 'blue')
plt.ylabel("Recordings", fontsize = 22)
plt.xlabel("", fontsize = 22)
plt.title("Number of alarm events per 24 hours" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(recordings)], recordings, rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'number_events_24H_rec.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Median duration of alarm events
fig = plt.figure()
fig.set_size_inches(12, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(recordings)+1)), alarm_stats_cum_rec.loc['mean duration (sec)', :], color = 'blue')
plt.ylabel("Recordings", fontsize = 22)
plt.xlabel("seconds", fontsize = 22)
plt.title("Median duration of alarm events" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(recordings)], recordings, rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'median_duration_rec.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Create a list of all alarms occurring in any recording
total_alarm_list = set()
for recording in recordings:
total_alarm_list.update(alarm_list[recording])
total_alarm_list = sorted(total_alarm_list)
# A list of all alarms occurring during the service evaluation
total_alarm_list
# Write alarm list to Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'total_alarm_list.xlsx'))
DataFrame(total_alarm_list, columns = ['alarm categories']).to_excel(writer, 'total_alarm_list')
writer.save()
total_alarm_number_alarms = {} # dictionary containing the number of alarm events in all recordings for the
# various alarm categories
for alarm in total_alarm_list:
total = 0
for recording in recordings:
if alarm in alarm_list[recording]:
total += len(alarm_events[recording][alarm].index)
total_alarm_number_alarms[alarm] = total
# Write alarm list to Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'total_alarm_list_numbers.xlsx'))
DataFrame([total_alarm_number_alarms]).T.to_excel(writer, 'total_alarm_list')
writer.save()
total_alarm_number_alarms_24H = {} # dictionary containing the number of alarm events in all recordings for the
# various alarm categories normalized for 24 hour recording periods
for alarm in total_alarm_list:
total_alarm_number_alarms_24H[alarm] = round(((total_alarm_number_alarms[alarm] /
(total_recording_time.total_seconds() / 86400))), 4)
alarm_durations_alarms = {} # a dictionary of Series. Each Series contains all durations of a particular alarm
# in all recordings
for alarm in total_alarm_list:
durations = []
for recording in recordings:
if alarm in alarm_list[recording]:
durations.append(alarm_events[recording][alarm]['duration_seconds'])
durations = pd.concat(durations)
alarm_durations_alarms[alarm] = durations
cum_alarm_duration_alarms = {} # dictionary containing the total duration of alarms in all recordings for the
# various alarm categories
for alarm in total_alarm_list:
cum_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].sum(), 4)
cum_alarm_duration_alarms_24H = {} # dictionary containing the total duration of alarms in all recordings for the
# various alarm categories normalized for 24 hour recording periods
for alarm in total_alarm_list:
cum_alarm_duration_alarms_24H[alarm] = round(((cum_alarm_duration_alarms[alarm] /
(total_recording_time.total_seconds() / 86400))), 4)
# libraries containing various descriptive statistics for each recording
mean_alarm_duration_alarms = {}
median_alarm_duration_alarms = {}
sd_alarm_duration_alarms = {}
mad_alarm_duration_alarms = {}
min_alarm_duration_alarms = {}
pc25_alarm_duration_alarms = {}
pc75_alarm_duration_alarms = {}
max_alarm_duration_alarms = {}
for alarm in total_alarm_list:
mean_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].mean(), 4)
median_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].median(), 4)
sd_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].std(), 4)
mad_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].mad(), 4)
min_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].min(), 4)
pc25_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].quantile(0.25), 4)
pc75_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].quantile(0.75), 4)
max_alarm_duration_alarms[alarm] = round(alarm_durations_alarms[alarm].max(), 4)
# Create DataFrame containing cumulative alarm statistics for each alarm
alarm_stats_cum_al = DataFrame([total_alarm_number_alarms,
total_alarm_number_alarms_24H,
cum_alarm_duration_alarms,
cum_alarm_duration_alarms_24H,
mean_alarm_duration_alarms,
median_alarm_duration_alarms,
sd_alarm_duration_alarms,
mad_alarm_duration_alarms,
min_alarm_duration_alarms,
pc25_alarm_duration_alarms,
pc75_alarm_duration_alarms,
max_alarm_duration_alarms],
index = ['count', 'count per 24h', 'total alarm duration (sec)', 'total alarm duration per 24 hours (sec)',
'mean duration (sec)', 'median duration (sec)', 'sd duration (sec)', 'mad duration (sec)',
'min duration (sec)', '25th cent duration (sec)', '75th cent duration (sec)',
'max duration (sec)'])
# Dataframe containing cumulative alarm statistics for each alarm
alarm_stats_cum_al.round(2)
# Write Dataframe containing cumulative alarm statistics for each alarm to Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'alarm_stats_cum_al.xlsx'))
alarm_stats_cum_al.round(2).to_excel(writer, 'cumulative_stats')
writer.save()
# Reduce a too long alarm name
total_alarm_list[0] = 'A setting, alarm limit or vent...'
# Total number of alarm events in all recordings
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(total_alarm_list)+1)), alarm_stats_cum_al.loc['count', :], color = 'blue')
plt.ylabel("Alarms", fontsize = 22)
plt.xlabel("", fontsize = 22)
plt.title("Number of alarm events" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(total_alarm_list)], total_alarm_list, rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'number_events_al.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Total number of alarm events in all recordings normalized for 24 hour periods
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(total_alarm_list)+1)), alarm_stats_cum_al.loc['count per 24h', :], color = 'blue')
plt.ylabel("Alarms", fontsize = 22)
plt.xlabel("", fontsize = 22)
plt.title("Number of alarm events per 24 hour" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(total_alarm_list)], total_alarm_list, rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'number_events_24H_al.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Median duration of alarm events in all recordings
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(total_alarm_list)+1)), alarm_stats_cum_al.loc['median duration (sec)', :], color = 'blue')
plt.ylabel("Alarms", fontsize = 22)
plt.xlabel("seconds", fontsize = 22)
plt.title("Median duration of alarm events" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(total_alarm_list)], total_alarm_list, rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'median_events_al.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
all_durations = [] # Series containing durations of all alarm events in all the recording
for recording in recordings:
for alarm in alarm_list[recording]:
all_durations.append(alarm_events[recording][alarm]['duration_seconds'])
all_durations = pd.concat(all_durations)
# The total number of alarm events in all the recordings
total_count = len(all_durations)
total_count
# The total number of alarm events in all the recordings per 24 hour
total_count_24H = total_count / (total_recording_time.total_seconds() / 86400)
total_count_24H
# Calculate descriptive statistics (expressed in seconds)
mean_duration_total = round(all_durations.mean(), 4)
median_duration_total = round(all_durations.median(), 4)
sd_duration_total = round(all_durations.std(), 4)
mad_duration_total = round(all_durations.mad(), 4)
min_duration_total = round(all_durations.min(), 4)
pc25_duration_total = round(all_durations.quantile(0.25), 4)
pc75_duration_total = round(all_durations.quantile(0.75), 4)
max_duration_total = round(all_durations.max(), 4)
alarm_stats_cum_total = DataFrame([ total_count, total_count_24H,
mean_duration_total, median_duration_total,
sd_duration_total, mad_duration_total, min_duration_total,
pc25_duration_total, pc75_duration_total, max_duration_total],
columns = ['all alarms in all recordings'],
index = ['total alarm events', 'total alarm events per 24 hours',
'mean alarm duration (sec)', 'median alarm duration (sec)',
'sd alarm duration (sec)', 'mad alarm duration (sec)',
'min alarm duration (sec)', '25 centile alarm duration (sec)',
'75 centile alarm duration (sec)', 'max alarm duration (sec)'])
# Cumulative statistics of the whole datasett
alarm_stats_cum_total.round(2)
# Write cumulative statistics to Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'alarm_stats_cum_total.xlsx'))
alarm_stats_cum_total.to_excel(writer, 'cumulative_stats')
writer.save()
# Histogram showing the number of alarms which were shorter than 1 minute
fig = plt.figure()
fig.set_size_inches(12, 6)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1)
n, bins, patches = plt.hist(all_durations, bins = range(0, 60))
plt.grid(True)
plt.xlabel('Alarm duration (seconds)', fontsize = 20)
plt.ylabel('Number of events', fontsize = 20)
plt.xticks(range(0,60,2), fontsize = 10)
plt.yticks(fontsize = 10)
plt.title('Histogram of alarm durations', fontsize = 20)
fig.savefig('%s/%s' % (DIR_WRITE, 'alarm_duration_hist_1.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Histogram showing the number of alarms which were shorter than 10 minutes
fig = plt.figure()
fig.set_size_inches(12, 6)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1)
n, bins, patches = plt.hist(all_durations, bins = range(0, 600))
plt.grid(True)
plt.xlabel('Alarm duration (seconds)', fontsize = 20)
plt.ylabel('Number of events', fontsize = 20)
plt.xticks(range(0, 600, 60), fontsize = 10)
plt.yticks(fontsize = 10)
plt.title('Histogram of alarm durations', fontsize = 20)
fig.savefig('%s/%s' % (DIR_WRITE, 'alarm_duration_hist_2.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Histogram showing all data with a bin size of 1minutes and log X axis
fig = plt.figure()
fig.set_size_inches(12, 6)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1)
n, bins, patches = plt.hist(all_durations, bins = range(0, 50000, 60))
plt.grid(True)
plt.xlabel('Alarm duration (seconds)', fontsize = 20)
plt.ylabel('Number of events', fontsize = 20)
plt.xticks(range(0, 50000, 600), fontsize = 10)
plt.yticks(fontsize = 10)
plt.xscale('log')
plt.yscale('log')
plt.title('Histogram of alarm durations', fontsize = 20)
fig.savefig('%s/%s' % (DIR_WRITE, 'alarm_duration_hist_3.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
under_10_sec = sorted([al for al in all_durations if al < 10])
len(under_10_sec)
under_1_min = sorted([al for al in all_durations if al <= 60])
len(under_1_min)
under_10_sec_MV_low = sorted([al for al in alarm_durations_alarms['Minute volume < low limit'] if al < 10])
under_10_sec_MV_high = sorted([al for al in alarm_durations_alarms['Minute volume > high limit'] if al < 10])
under_10_sec_RR_high = sorted([al for al in alarm_durations_alarms['Respiratory rate > high limit'] if al < 10])
len(under_10_sec_MV_low), len(under_10_sec_MV_high), len(under_10_sec_RR_high)
# Short alarms (<10 sec) in the categories where the user sets the limits
len(under_10_sec_MV_low) + len(under_10_sec_MV_high) + len(under_10_sec_RR_high)
# How many alarm events are longer than 1 hour?
over_1_hour = sorted([al for al in all_durations if al > 3600])
len(over_1_hour)
# Which alarms were longer than one hour?
alarms_over_1_hour = []
for recording in recordings:
for alarm in alarm_list[recording]:
for event in alarm_events[recording][alarm]['duration_seconds']:
if event > 3600:
alarms_over_1_hour.append((recording, alarm, event))
alarms_over_1_hour = DataFrame(sorted(alarms_over_1_hour, key = lambda x: x[2], reverse = True),
columns = ['recording', 'alarm', 'duration (seconds)'])
alarms_over_1_hour
alarms_over_1_hour.groupby('alarm').count()
over_10_minutes = sorted([al for al in all_durations if al > 600 and al <= 3600])
len(over_10_minutes)
alarms_over_10_min = []
# which alarms were longer than 10 minutes but shorter than 1 hour
for recording in recordings:
for alarm in alarm_list[recording]:
for event in alarm_events[recording][alarm]['duration_seconds']:
if event > 600 and event <= 3600:
alarms_over_10_min.append((recording, alarm, event))
alarms_over_10_min = DataFrame(sorted(alarms_over_10_min, key = lambda x: x[2], reverse = True),
columns = ['recording', 'alarm', 'duration (seconds)'])
alarms_over_10_min.groupby('alarm').count()
over_1_minutes = sorted([al for al in all_durations if al > 60 and al <= 600])
len(over_1_minutes)
alarms_over_1_min = []
# which alarms were longer than 1 minutes but shorter than 10 minutes
for recording in recordings:
for alarm in alarm_list[recording]:
for event in alarm_events[recording][alarm]['duration_seconds']:
if event > 60 and event <= 600:
alarms_over_1_min.append((recording, alarm, event))
alarms_over_1_min = DataFrame(sorted(alarms_over_1_min, key = lambda x: x[2], reverse = True),
columns = ['recording', 'alarm', 'duration (seconds)'])
alarms_over_1_min.groupby('alarm').count()
# Write long alarms into a multisheet Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'long_alarms.xlsx'))
alarms_over_1_hour.to_excel(writer, 'over_1hour')
alarms_over_10_min.to_excel(writer, '10min_to_1hour')
alarms_over_1_min.to_excel(writer, '1min_to_10min')
writer.save()
# Identify the most frequent alarm events
frequent_alarms = alarm_stats_cum_al.loc['count'].sort_values(inplace = False, ascending = False)
# The eight most frequent alarms
frequent_alarms[:8]
# How many percent of all alarms were these 8 frequent alarms?
round(frequent_alarms[:8].sum() / frequent_alarms.sum(), 3) * 100
# Write frequent alarm in an Excel file
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'frequent_alarms.xlsx'))
DataFrame(frequent_alarms[:8]).to_excel(writer, 'frequent_alarms')
writer.save()
# Number of alarms where the user sets the limits
user_set_alarms = (frequent_alarms['Minute volume < low limit'] + frequent_alarms['Minute volume > high limit'] +
frequent_alarms['Respiratory rate > high limit'])
int(user_set_alarms)
# What proportion of all alarms were these 3 user-set alarms?
print('%.3f' % (user_set_alarms / frequent_alarms.sum()))
# Frequent alarms related to VT not achieved
other_frequent_alarms = (frequent_alarms['Tidal volume < low Limit'] + frequent_alarms['Volume not constant'] +
frequent_alarms['Tube obstructed'])
int(other_frequent_alarms)
# What proportion of all alarms were alarms related to VT not achieved?
print('%.3f' % (other_frequent_alarms / frequent_alarms.sum()))
MV_low_count = {}
for recording in recordings:
try:
MV_low_count[recording] = alarm_stats[recording]['Minute volume < low limit']['number of events'].iloc[0]
except KeyError:
# print('No "MV_low" alarm in recording %s' % recording)
pass
MV_low_count_24H = {}
for recording in recordings:
try:
MV_low_count_24H[recording] = \
alarm_stats[recording]['Minute volume < low limit']['number of event per 24h'].iloc[0]
except KeyError:
# print('No "MV_low" alarm in recording %s' % recording)
pass
MV_high_count = {}
for recording in recordings:
try:
MV_high_count[recording] = alarm_stats[recording]['Minute volume > high limit']['number of events'].iloc[0]
except KeyError:
# print('No "MV_high" alarm in recording %s' % recording)
pass
MV_high_count_24H = {}
for recording in recordings:
try:
MV_high_count_24H[recording] = alarm_stats[recording]['Minute volume > high limit']['number of event per 24h'].iloc[0]
except KeyError:
# print('No "MV_high" alarm in recording %s' % recording)
pass
RR_high_count = {}
for recording in recordings:
try:
RR_high_count[recording] = alarm_stats[recording]['Respiratory rate > high limit']['number of events'].iloc[0]
except KeyError:
# print('No "RR_high" alarm in recording %s' % recording)
pass
RR_high_count_24H = {}
for recording in recordings:
try:
RR_high_count_24H[recording] = alarm_stats[recording]['Respiratory rate > high limit']['number of event per 24h'].iloc[0]
except KeyError:
# print('No "RR_high" alarm in recording %s' % recording)
pass
# Plot the number of MV < low limit alarm events and write graph to file
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(MV_low_count)+1)), MV_low_count.values(), color = 'blue')
plt.ylabel("Recordings", fontsize = 16)
plt.xlabel("number of alarm events", fontsize = 16)
plt.title("MV < low limit" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(MV_low_count.keys())], MV_low_count.keys(),
rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'MV_low.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Plot the number of MV < low limit alarm events normalized for 24 hours and write graph to file
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(MV_low_count_24H)+1)), MV_low_count_24H.values(), color = 'blue')
plt.ylabel("Recordings", fontsize = 16)
plt.xlabel("number of alarm events per 24 hours", fontsize = 16)
plt.title("MV < low limit" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(MV_low_count_24H.keys())], MV_low_count_24H.keys(),
rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'MV_low_24H.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Plot the number of MV > low limit alarm events and write graph to file
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(MV_high_count)+1)), MV_high_count.values(), color = 'blue')
plt.ylabel("Recordings", fontsize = 16)
plt.xlabel("number of alarm events", fontsize = 16)
plt.title("MV > high limit" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(MV_high_count.keys())], MV_high_count.keys(),
rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'MV_high.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Plot the number of MV > low limit alarm events and write graph to file
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(MV_high_count_24H)+1)), MV_high_count_24H.values(), color = 'blue')
plt.ylabel("Recordings", fontsize = 16)
plt.xlabel("number of alarm events per 24 hours", fontsize = 16)
plt.title("MV > high limit" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(MV_high_count_24H.keys())], MV_high_count_24H.keys(),
rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'MV_high_24H.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Plot the number of RR > high limit alarm events and write graph to file
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(RR_high_count)+1)), RR_high_count.values(), color = 'blue')
plt.ylabel("Recordings", fontsize = 16)
plt.xlabel("number of alarm events", fontsize = 16)
plt.title("RR > high limit" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(RR_high_count.keys())], RR_high_count.keys(),
rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'RR_high.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
# Plot the number of RR > high limit alarm events normalized for 24 hours and write graph to file
fig = plt.figure()
fig.set_size_inches(17, 12)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
ax1.barh(list(range(1, len(RR_high_count_24H)+1)), RR_high_count_24H.values(), color = 'blue')
plt.ylabel("Recordings", fontsize = 16)
plt.xlabel("number of alarm events per 24 hours", fontsize = 16)
plt.title("RR > high limit" , fontsize = 26)
ax1.tick_params(which = 'both', labelsize=14)
plt.yticks([i+1.5 for i, _ in enumerate(RR_high_count_24H.keys())],
RR_high_count_24H.keys(), rotation = 'horizontal')
plt.grid()
fig.savefig('%s/%s' % (DIR_WRITE, 'RR_high_24H.jpg'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
for recording in recordings:
slow_measurements[recording] = pd.concat([slow_measurements[recording],
vent_settings_2[recording], alarm_settings_2[recording]], axis = 0, join = 'outer')
slow_measurements[recording].sort_index(inplace = True)
for recording in recordings:
slow_measurements[recording] = slow_measurements[recording].fillna(method = 'pad')
def minute_volume_plotter(rec, ylim = False):
'''
Plots the total minute volumme (using the data obtained with 1/sec sampling rate)
together with the "MV low" and "MV high" alarm limits
Displays the plot
'''
if ylim:
ymax = ylim
else:
ymax = slow_measurements[rec]['MV_high_weight'].max() + 0.3
fig = plt.figure()
fig.set_size_inches(12, 8)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['MV_kg'].plot(ax = ax1, color = 'blue', ylim = [0, ymax] );
slow_measurements[rec]['MV_low_weight'].plot(ax = ax1, color = 'green', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['MV_high_weight'].plot(ax = ax1, color = 'red', linewidth = 3, ylim = [0, ymax] );
ax1.set_title('Minute volume - %s' % rec, size = 22, color = 'black')
ax1.set_xlabel('Time', size = 22, color = 'black')
ax1.set_ylabel('L/kg/min', size = 22, color = 'black')
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['MV_kg', 'alarm_low', 'alarm_high']);
minute_volume_plotter('DG003')
def minute_volume_plotter_2(rec, ylim = False, version = ''):
'''
Plots the total minute volumme (using the data obtained with 1/sec sampling rate)
together with the "MV low" and "MV high" alarm limits
Writes the plot to file (does not display the plot)
'''
if ylim:
ymax = ylim
else:
ymax = slow_measurements[rec]['alarm_MV_high_weight'].max() + 0.3
fig = plt.figure()
fig.set_size_inches(12, 8)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['MV_kg'].plot(ax = ax1, color = 'blue', ylim = [0, ymax] );
slow_measurements[rec]['alarm_MV_low_weight'].plot(ax = ax1, color = 'green', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['alarm_MV_high_weight'].plot(ax = ax1, color = 'red', linewidth = 3, ylim = [0, ymax] );
ax1.set_title('Minute volume - %s' % rec, size = 22, color = 'black')
ax1.set_xlabel('Time', size = 22, color = 'black')
ax1.set_ylabel('L/kg/min', size = 22, color = 'black')
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['MV_kg', 'alarm_low', 'alarm_high']);
fig.savefig('%s/%s_%s%s.jpg' % (dir_write, 'minute_volume', rec, version), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
plt.close(fig)
def resp_rate_plotter(rec, ylim = False):
'''
Plots the total reapiratory rate (using the data obtained with 1/sec sampling rate)
together with the set backup rate and "RR high" alarm limits
Displays the plot
'''
if ylim:
ymax = ylim
else:
ymax = slow_measurements[rec]['5001|RR [1/min]'].max() + 10
fig = plt.figure()
fig.set_size_inches(12, 8)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['5001|RR [1/min]'].plot(ax = ax1, color = 'blue', ylim = [0, ymax] );
slow_measurements[rec]['RR_high'].plot(ax = ax1, color = 'red', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['RR_set'].plot(ax = ax1, color = 'green', linewidth = 3, ylim = [0, ymax] );
ax1.set_title('Respiratory rate - %s' % rec, size = 22, color = 'black')
ax1.set_xlabel('Time', size = 22, color = 'black')
ax1.set_ylabel('1/min', size = 22, color = 'black')
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['RR', 'alarm_high', 'RR_set']);
resp_rate_plotter('DG003')
def resp_rate_plotter_2(rec, ylim = False, version = ''):
'''
Plots the total reapiratory rate (using the data obtained with 1/sec sampling rate)
together with the set backup rate and "RR high" alarm limits
Writes the plots to files (does not display the plot)
'''
if ylim:
ymax = ylim
else:
ymax = slow_measurements[rec]['5001|RR [1/min]'].max() + 10
fig = plt.figure()
fig.set_size_inches(12, 8)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['5001|RR [1/min]'].plot(ax = ax1, color = 'blue', ylim = [0, ymax] );
slow_measurements[rec]['alarm_RR_high'].plot(ax = ax1, color = 'red', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['RR_set'].plot(ax = ax1, color = 'green', linewidth = 3, ylim = [0, ymax] );
ax1.set_title('Respiratory rate - %s' % rec, size = 22, color = 'black')
ax1.set_xlabel('Time', size = 22, color = 'black')
ax1.set_ylabel('1/min', size = 22, color = 'black')
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['RR', 'alarm_high', 'RR_set'])
fig.savefig('%s/%s_%s%s.jpg' % (dir_write, 'resp_rate', rec, version), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='jpg',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
plt.close(fig)
clinical_details_for_paper = clinical_details[['Gestation', 'Birth weight', 'Current weight', 'Main diagnoses']]
clinical_details_for_paper = clinical_details_for_paper.loc[recordings]
# clinical_details_for_paper
vent_modes_all = {}
for recording in recordings:
vent_modes_all[recording] = vent_modes_selected[recording].Text.unique()
vent_modes_all[recording] = [mode[5:] for mode in vent_modes_all[recording] if mode.startswith(' Mode')]
vent_modes_all = DataFrame([vent_modes_all]).T
vent_modes_all.columns = ['Ventilation modes']
vent_modes_all = vent_modes_all.loc[recordings]
# vent_modes_all
recording_duration_hours_all = DataFrame([recording_duration_hours]).T
recording_duration_hours_all.columns = ['Recording duration (hours)']
Table_1 = recording_duration_hours_all.join([clinical_details_for_paper, vent_modes_all])
Table_1
writer = pd.ExcelWriter('%s/%s' % (DIR_WRITE, 'Table_1.xlsx'))
Table_1.to_excel(writer)
writer.save()
rec = 'DG032_2'
filetype = 'jpg'
dpi = 300
alarm_state = alarm_states[rec]
numbered = Series(np.zeros(len(alarm_state)), index = alarm_state.index)
for i in range(1, len(alarm_state)):
if alarm_state.iloc[i]['State New'] == 'Active':
numbered[i] = alarm_list[rec].index(alarm_state.iloc[i]['Id']) + 1
fig = plt.figure()
fig.set_size_inches(10, 4)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax1 = fig.add_subplot(1, 1, 1);
ax1.plot(alarm_state.index, numbered, '|', color = 'red', markersize = 14, markeredgewidth = 0.5 )
plt.xlabel("Time", fontsize = 14)
plt.title(rec)
plt.yticks([i+1 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 14);
plt.xticks(fontsize = 8)
plt.ylim(0.5, len(alarm_list[rec]) + 0.5)
fig.savefig('%s/%s.jpg' % (DIR_WRITE, 'Figure_1a'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
rec = 'DG032_2'
filetype = 'jpg'
dpi = 300
fig = plt.figure()
fig.set_size_inches(8, 4)
fig.subplots_adjust(left=0.5, bottom=None, right=None, top=None, wspace=None, hspace= None)
ax1 = fig.add_subplot(1, 1, 1)
xs = [i + 0.1 for i, _ in enumerate(alarm_list[rec])]
stats = []
for alarm in alarm_list[rec]:
stats.append(alarm_stats[rec][alarm]['percentage of recording length (%)'])
stats_all = pd.concat(stats)
plt.barh(xs, stats_all, color = 'red')
plt.xlabel("% of total recording time", fontsize = 14)
plt.title(rec)
plt.yticks([i + 0.5 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 14)
plt.xticks(fontsize = 14);
fig.savefig('%s/%s.jpg' % (DIR_WRITE, 'Figure_1b'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
rec = 'DG032_2'
filetype = 'tiff'
dpi = 300
alarm_state = alarm_states[rec]
numbered = Series(np.zeros(len(alarm_state)), index = alarm_state.index)
for i in range(1, len(alarm_state)):
if alarm_state.iloc[i]['State New'] == 'Active':
numbered[i] = alarm_list[rec].index(alarm_state.iloc[i]['Id']) + 1
fig = plt.figure()
fig.set_size_inches(9, 7)
fig.subplots_adjust(left=0.4, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
ax1 = fig.add_subplot(2, 1, 1);
ax1.plot(alarm_state.index, numbered, '|', color = 'red', markersize = 10, markeredgewidth = 0.5 )
plt.xlabel("Time", fontsize = 12)
plt.title(rec)
plt.yticks([i+1 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 12);
plt.xticks(fontsize = 8)
plt.ylim(0.5, len(alarm_list[rec]) + 0.5)
ax1 = fig.add_subplot(2, 1, 2)
xs = [i + 0.1 for i, _ in enumerate(alarm_list[rec])]
stats = []
for alarm in alarm_list[rec]:
stats.append(alarm_stats[rec][alarm]['percentage of recording length (%)'])
stats_all = pd.concat(stats)
plt.barh(xs, stats_all, color = 'red')
plt.xlabel("% of total recording time", fontsize = 12)
plt.title(rec)
plt.yticks([i + 0.5 for i, _ in enumerate(alarm_list[rec])], alarm_list[rec], fontsize = 12)
plt.xticks(fontsize = 8);
fig.savefig('%s/%s.tiff' % (DIR_WRITE, 'Figure_1'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
rec = 'DG003'
filetype = 'jpg'
dpi = 300
ymax = slow_measurements[rec]['MV_high_weight'].max() + 0.3
fig = plt.figure()
fig.set_size_inches(8, 6)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['MV_kg'].plot(ax = ax1, color = 'blue', ylim = [0, ymax] );
slow_measurements[rec]['MV_low_weight'].plot(ax = ax1, color = 'green', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['MV_high_weight'].plot(ax = ax1, color = 'red', linewidth = 3, ylim = [0, ymax] );
ax1.set_title(rec, size = 14, color = 'black')
ax1.set_xlabel('Time', size = 14, color = 'black')
ax1.set_ylabel('L/min/kg', size = 14, color = 'black')
ax1.tick_params(which = 'both', labelsize=12)
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['MV_kg', 'alarm_low', 'alarm_high']);
fig.savefig('%s/%s.jpg' % (DIR_WRITE, 'Figure_2a_color'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
rec = 'DG003'
filetype = 'jpg'
dpi = 300
ymax = slow_measurements[rec]['MV_high_weight'].max() + 0.3
fig = plt.figure()
fig.set_size_inches(8, 6)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['MV_kg'].plot(ax = ax1, color = 'black', alpha = 0.6, ylim = [0, ymax] );
slow_measurements[rec]['MV_low_weight'].plot(ax = ax1, color = 'black', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['MV_high_weight'].plot(ax = ax1, color = 'black', linewidth = 3, ylim = [0, ymax] );
ax1.set_title(rec, size = 14, color = 'black')
ax1.set_xlabel('Time', size = 14, color = 'black')
ax1.set_ylabel('L/min/kg', size = 14, color = 'black')
ax1.tick_params(which = 'both', labelsize=12)
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['MV_kg', 'alarm_low', 'alarm_high']);
fig.savefig('%s/%s.jpg' % (DIR_WRITE, 'Figure_2a_bw'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
rec = 'DG041'
filetype = 'jpg'
dpi = 300
ymax = slow_measurements[rec]['5001|RR [1/min]'].max() + 15
fig = plt.figure()
fig.set_size_inches(8, 6)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['5001|RR [1/min]'].plot(ax = ax1, color = 'blue', ylim = [0, ymax] );
slow_measurements[rec]['RR_high'].plot(ax = ax1, color = 'red', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['RR_set'].plot(ax = ax1, color = 'green', linewidth = 3, ylim = [0, ymax] );
ax1.set_title(rec, size = 14, color = 'black')
ax1.set_xlabel('Time', size = 14, color = 'black')
ax1.set_ylabel('1/min', size = 14, color = 'black')
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['RR', 'alarm_high', 'RR_set'])
fig.savefig('%s/%s.jpg' % (DIR_WRITE, 'Figure_2b_color'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
rec = 'DG041'
filetype = 'jpg'
dpi = 300
ymax = slow_measurements[rec]['5001|RR [1/min]'].max() + 15
fig = plt.figure()
fig.set_size_inches(8, 6)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=0.7)
ax1 = fig.add_subplot(1, 1, 1);
slow_measurements[rec]['5001|RR [1/min]'].plot(ax = ax1, color = 'black', alpha = 0.6, ylim = [0, ymax] );
slow_measurements[rec]['RR_high'].plot(ax = ax1, color = 'black', linewidth = 3, ylim = [0, ymax] );
slow_measurements[rec]['RR_set'].plot(ax = ax1, color = 'black', linewidth = 3, ylim = [0, ymax] );
ax1.set_title(rec, size = 14, color = 'black')
ax1.set_xlabel('Time', size = 14, color = 'black')
ax1.set_ylabel('1/min', size = 14, color = 'black')
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['RR', 'alarm_high', 'RR_set'])
fig.savefig('%s/%s.jpg' % (DIR_WRITE, 'Figure_2b_bw'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
rec0 = 'DG003'
rec1 = 'DG041'
filetype = 'tiff'
dpi = 300
ymax0 = slow_measurements[rec0]['MV_high_weight'].max() + 0.3
ymax1 = slow_measurements[rec1]['5001|RR [1/min]'].max() + 15
fig = plt.figure()
fig.set_size_inches(6, 9)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
ax0 = fig.add_subplot(2, 1, 1);
slow_measurements[rec0]['MV_kg'].plot(ax = ax0, color = 'blue', ylim = [0, ymax0] );
slow_measurements[rec0]['MV_low_weight'].plot(ax = ax0, color = 'green', linewidth = 3, ylim = [0, ymax0] );
slow_measurements[rec0]['MV_high_weight'].plot(ax = ax0, color = 'red', linewidth = 3, ylim = [0, ymax0] );
ax0.set_title(rec0, size = 12, color = 'black')
ax0.set_xlabel('', size = 12, color = 'black')
ax0.set_ylabel('L/min/kg', size = 12, color = 'black')
ax0.tick_params(which = 'both', labelsize=10)
ax0.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax0.legend(['MV_kg', 'alarm_low', 'alarm_high']);
ax1 = fig.add_subplot(2, 1, 2);
slow_measurements[rec1]['5001|RR [1/min]'].plot(ax = ax1, color = 'blue', ylim = [0, ymax1] );
slow_measurements[rec1]['RR_high'].plot(ax = ax1, color = 'red', linewidth = 3, ylim = [0, ymax1] );
slow_measurements[rec1]['RR_set'].plot(ax = ax1, color = 'green', linewidth = 3, ylim = [0, ymax1] );
ax1.set_title(rec1, size = 12, color = 'black')
ax1.set_xlabel('Time', size = 12, color = 'black')
ax1.set_ylabel('1/min', size = 12, color = 'black')
ax1.tick_params(which = 'both', labelsize=10)
ax1.grid('on', linestyle='-', linewidth=0.5, color = 'gray')
ax1.legend(['RR', 'alarm_high', 'RR_set'], loc = 4)
fig.savefig('%s/%s.tiff' % (DIR_WRITE, 'Figure_2'), dpi=dpi, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format= filetype,
transparent=False, bbox_inches=None, pad_inches=0.1, frameon=True)
# Histogram showing the number of alarms which were shorter than 1 minute
fig = plt.figure()
fig.set_size_inches(7, 5)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax1 = fig.add_subplot(1, 1, 1)
n, bins, patches = plt.hist(all_durations, bins = range(0, 60))
plt.grid(True)
plt.xlabel('Alarm duration (seconds)', fontsize = 12)
plt.ylabel('Number of alarm events', fontsize = 12)
plt.xticks(range(0,60,4), fontsize = 12)
plt.yticks(fontsize = 12)
plt.title('Histogram of alarm durations', fontsize = 12)
fig.savefig('%s/%s' % (DIR_WRITE, 'Figure_3.tiff'), dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format='tiff',
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import modules containing own functions
Step2: List and set the working directory and the directories to write out data
Step3: List of the recordings
Step4: Import clinical details
Step5: Import ventilator parameters retrieved with 1/sec frequency
Step6: Calculating parameters / body weight kg
Step7: Resampling to remove half-empty rows
Step8: Save processed slow_measurements DataFrames to pickle archive
Step9: Import processed 'slow_measurements' data from pickle archive
Step10: Visualising recording durations
Step11: Write recording times out into files
Step12: Import ventilator modes and settings
Step13: Import ventilation modes
Step14: Save ventilation modes and settings into Excel files
Step15: Import alarm settings
Step16: Import alarm states
Step17: Calculate the total and average time of all recordings
Step18: Generate alarm events from alarm states
Step19: Using the files containing the alarm states, for each alarm category in each recording create a DataFrame with the timestamps the alarm went off and the duration of the alarm and store them in a dictionary of dictionaries
Step20: Calculate descriptive statistics for each alarm in each recording and write them to file
Step21: Visualise alarm statistics for the individual alarms in the individual recording
Step22: Example plots
Step23: Write all graphs to files
Step24: In each recording, what was the mean, median, sd, mad, min, 25pc, 75pc, max of alarm durations
Step25: Visualize cumulative statistics of recordings
Step26: Generate cumulative statistics of each alarm in all recordings combined
Step27: For each alarm, what was number of alarm events across all recordings and the number of events normalized per 24 hour recording time
Step28: For each alarm, what was the total duration of alarm events across all recordings and normalized per 24 hour recording time
Step29: For each alarm what was the mean, median, sd, mad, min, 25pc, 75pc, max of alarm durations
Step30: Visualising cumulative statistics of alarms
Step31: Calculate cumulative descriptive statistics of all alarms in all recording together
Step32: Visualise the duration of all alarm events as histogram
Step33: How many short alarms did occur?
Step34: Check which are the longest alarms
Step35: How many alarm events are longer than 10 minutes but shorter than 1 hour?¶
Step36: how many alarm events are longer than 1 minutes?¶
Step37: Check which are the most frequent alarms
Step38: Visualise MV and RR limit alarms
Step39: Investigate the relationship of MV and RR parameter readings, ventilation settings and alarm settings
Step40: Create the tables and figures of the paper
Step41: Figure 1
Step42: Figure 2
Step43: Figure 3
|
7,221
|
<ASSISTANT_TASK:>
Python Code:
replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"
field_name ::= arg_name ("." attribute_name | "[" element_index "]")*
arg_name ::= [identifier | integer]
attribute_name ::= identifier
element_index ::= integer | index_string
index_string ::= <any source character except "]"> +
conversion ::= "r" | "s" | "a"
format_spec ::= <described in the next section>
pass # Donot output
print("{} {}".format("Hello", "World"))
# is equal to...
print("{0} {1}".format("Hello", "World"))
print("{hello} {world}".format(hello="Hello", world="World"))
print("{0}{1}{0}".format("H", "e"))
print("{lang}.{suffix}".format(**{"lang": "Python", "suffix": "py"}))
print("{} {}".format(*["Python", "Rocks"]))
data = {'name': 'Python', 'score': 100}
print("Name: {0[name]}, Score: {0[score]}".format(data)) # 不需要引号
langs = ["Python", "Ruby"]
print("{0[0]} vs {0[1]}".format(langs))
print("\n====\nHelp(format):\n {.__doc__}".format(str.format))
for align, text in zip("<^>", ["left", "center", "right"]):
print("{:{fill}{align}16}".format(text, fill=align, align=align))
print("{:0=10}".format(100)) # = 只允许数字
print("{0:+}\n{1:-}\n{0: }".format(3.14, -3.14))
print("Binary: {0:b} => {0:#b}".format(3))
print("Large Number: {0:} => {0:,}".format(1.25e6))
print("Padding: {0:16} => {0:016}".format(3))
from math import pi
print("pi = {pi:.2}, also = {pi:.7}".format(pi=pi))
for t in "b c d #o #x #X n".split():
print("Type {0:>2} of {1} shows: {1:{t}}".format(t, 97, t=t))
for t, n in zip("eEfFgGn%", [12345, 12345, 1.3, 1.3, 1, 2, 3.14, 0.985]):
print("Type {} shows: {:.2{t}}".format(t, n, t=t))
try:
print("{:s}".format(123))
except:
print("{}".format(456))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python 字符串的格式化
Step2: 我将其准换成铁路图的形式,(可能)更直观一些:
Step3: 除此之外,就像在0x05 函数参数与解包中提到的一样,format() 中也可以直接使用解包操作:
Step4: 在模板中还可以通过 .identifier 和 [key] 的方式获取变量内的属性或值(需要注意的是 "{}{}" 相当于 "{0}{1}"):
Step5: 强制转换
Step6: 同时可以看出,样式设置里面可以嵌套 {} ,但是必须通过 keyword 指定,且只能嵌套一层。
Step7: # 用于表示特殊格式的数字(二进制、十六进制等)是否需要前缀符号;, 也是用于表示数字时是否需要在千位处进行分隔;0 相当于前面的 {
Step8: 最后两个就是我们熟悉的小数点精度 .n 和格式化类型了,这里仅给出一些示例,详细内容可以查阅文档:
Step9: Integer
Step10: Float
Step11: String (default)
|
7,222
|
<ASSISTANT_TASK:>
Python Code:
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
!pip3 install {USER_FLAG} --upgrade pip
!pip3 install {USER_FLAG} --upgrade scikit-learn
!pip3 install {USER_FLAG} --upgrade google-cloud-aiplatform tensorboard-plugin-profile
!pip3 install {USER_FLAG} --upgrade tensorflow
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
PROJECT_ID = ""
# Get your Google Cloud project ID and project number from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
shell_output = !gcloud projects list --filter="$(gcloud config get-value project)" --format="value(PROJECT_NUMBER)" 2>/dev/null
PROJECT_NUMBER = shell_output[0]
print("Project number: ", PROJECT_NUMBER)
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].split()[1]
print("Service Account:", SERVICE_ACCOUNT)
!gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME
!gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME
SOURCE_DATA_PATH = "{}/swivel".format(BUCKET_NAME)
PIPELINE_ROOT = "{}/pipeline_root".format(BUCKET_NAME)
import pandas as pd
import tensorflow as tf
from google.cloud import aiplatform
from sklearn.metrics.pairwise import cosine_similarity
!gsutil cp gs://cloud-samples-data/vertex-ai/matching-engine/swivel/pipeline/* .
YOUR_PIPELINE_SUFFIX = "swivel-pipeline-movie" # @param {type:"string"}
MACHINE_TYPE = "n1-standard-16" # @param {type:"string"}
ACCELERATOR_COUNT = 2 # @param {type:"integer"}
ACCELERATOR_TYPE = "NVIDIA_TESLA_V100" # @param {type:"string"}
BUCKET = BUCKET_NAME[5:] # remove "gs://" for the following command.
!chmod +x swivel_template_configuration*
!./swivel_template_configuration_basic.sh -pipeline_suffix {YOUR_PIPELINE_SUFFIX} -project_number {PROJECT_NUMBER} -project_id {PROJECT_ID} -machine_type {MACHINE_TYPE} -accelerator_count {ACCELERATOR_COUNT} -accelerator_type {ACCELERATOR_TYPE} -pipeline_root {BUCKET}
!./swivel_template_configuration.sh -pipeline_suffix {YOUR_PIPELINE_SUFFIX} -project_number {PROJECT_NUMBER} -project_id {PROJECT_ID} -machine_type {MACHINE_TYPE} -accelerator_count {ACCELERATOR_COUNT} -accelerator_type {ACCELERATOR_TYPE} -pipeline_root {BUCKET}
# Copy the MovieLens sample dataset
! gsutil cp -r gs://cloud-samples-data/vertex-ai/matching-engine/swivel/movielens_25m/train/* {SOURCE_DATA_PATH}/movielens_25m
# MovieLens items embedding sample
PARAMETER_VALUES = {
"embedding_dim": 100, # <---CHANGE THIS (OPTIONAL)
"input_base": "{}/movielens_25m/train".format(SOURCE_DATA_PATH),
"input_type": "items", # For movielens sample
"max_vocab_size": 409600, # <---CHANGE THIS (OPTIONAL)
"num_epochs": 5, # <---CHANGE THIS (OPTIONAL)
}
# Instantiate PipelineJob object
pl = aiplatform.PipelineJob(
display_name=YOUR_PIPELINE_SUFFIX,
# Whether or not to enable caching
# True = always cache pipeline step result
# False = never cache pipeline step result
# None = defer to cache option for each pipeline component in the pipeline definition
enable_caching=False,
# Local or GCS path to a compiled pipeline definition
template_path="swivel_pipeline_basic.json",
# Dictionary containing input parameters for your pipeline
parameter_values=PARAMETER_VALUES,
# GCS path to act as the pipeline root
pipeline_root=PIPELINE_ROOT,
)
# Submit the Pipeline to Vertex AI
# Optionally you may specify the service account below: submit(service_account=SERVICE_ACCOUNT)
# You must have iam.serviceAccounts.actAs permission on the service account to use it
pl.submit()
! gsutil -m cp -r gs://cloud-samples-data/vertex-ai/matching-engine/swivel/models/movielens/model {SOURCE_DATA_PATH}/movielens_model
SAVEDMODEL_DIR = os.path.join(SOURCE_DATA_PATH, "movielens_model/model")
LOGS_DIR = os.path.join(SOURCE_DATA_PATH, "movielens_model/tensorboard")
# If on Google Cloud Notebooks, then don't execute this code.
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
# Load the TensorBoard notebook extension.
%load_ext tensorboard
# If on Google Cloud Notebooks, then don't execute this code.
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
%tensorboard --logdir $LOGS_DIR
ENDPOINT_NAME = "swivel_embedding" # <---CHANGE THIS (OPTIONAL)
MODEL_VERSION_NAME = "movie-tf2-cpu-2.4" # <---CHANGE THIS (OPTIONAL)
aiplatform.init(project=PROJECT_ID, location=REGION)
# Create a model endpoint
endpoint = aiplatform.Endpoint.create(display_name=ENDPOINT_NAME)
# Upload the trained model to Model resource
model = aiplatform.Model.upload(
display_name=MODEL_VERSION_NAME,
artifact_uri=SAVEDMODEL_DIR,
serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-4:latest",
)
# Deploy the Model to the Endpoint
model.deploy(
endpoint=endpoint,
machine_type="n1-standard-2",
)
!gsutil cp gs://cloud-samples-data/vertex-ai/matching-engine/swivel/movielens_25m/movies.csv ./movies.csv
movies = pd.read_csv("movies.csv")
print(f"Movie count: {len(movies.index)}")
movies.head()
# Change to your favourite movies.
query_movies = [
"Lion King, The (1994)",
"Aladdin (1992)",
"Star Wars: Episode IV - A New Hope (1977)",
"Star Wars: Episode VI - Return of the Jedi (1983)",
"Terminator 2: Judgment Day (1991)",
"Aliens (1986)",
"Godfather, The (1972)",
"Goodfellas (1990)",
]
def get_movie_id(title):
return list(movies[movies.title == title].movieId)[0]
input_items = [str(get_movie_id(title)) for title in query_movies]
predictions = endpoint.predict(instances=input_items)
embeddings = predictions.predictions
print(len(embeddings))
for idx1 in range(0, len(input_items) - 1, 2):
item1 = input_items[idx1]
title1 = query_movies[idx1]
print(title1)
print("==================")
embedding1 = embeddings[idx1]
for idx2 in range(0, len(input_items)):
item2 = input_items[idx2]
embedding2 = embeddings[idx2]
similarity = round(cosine_similarity([embedding1], [embedding2])[0][0], 5)
title1 = query_movies[idx1]
title2 = query_movies[idx2]
print(f" - Similarity to '{title2}' = {similarity}")
print()
# Copy the wikipedia sample dataset
! gsutil -m cp -r gs://cloud-samples-data/vertex-ai/matching-engine/swivel/wikipedia/* {SOURCE_DATA_PATH}/wikipedia
YOUR_PIPELINE_SUFFIX = "my-first-pipeline-wiki" # @param {type:"string"}
!./swivel_template_configuration.sh -pipeline_suffix {YOUR_PIPELINE_SUFFIX} -project_id {PROJECT_ID} -machine_type {MACHINE_TYPE} -accelerator_count {ACCELERATOR_COUNT} -accelerator_type {ACCELERATOR_TYPE} -pipeline_root {BUCKET}
# wikipedia text embedding sample
PARAMETER_VALUES = {
"embedding_dim": 100, # <---CHANGE THIS (OPTIONAL)
"input_base": "{}/wikipedia".format(SOURCE_DATA_PATH),
"input_type": "text", # For wikipedia sample
"max_vocab_size": 409600, # <---CHANGE THIS (OPTIONAL)
"num_epochs": 20, # <---CHANGE THIS (OPTIONAL)
}
! gsutil -m cp -r gs://cloud-samples-data/vertex-ai/matching-engine/swivel/models/wikipedia/model {SOURCE_DATA_PATH}/wikipedia_model
SAVEDMODEL_DIR = os.path.join(SOURCE_DATA_PATH, "wikipedia_model/model")
embedding_model = tf.saved_model.load(SAVEDMODEL_DIR)
input_items = ["horror", "film", '"HORROR! Film"', "horror-film"]
output_embeddings = embedding_model(input_items)
horror_film_embedding = tf.math.reduce_mean(output_embeddings[:2], axis=0)
# Average of embeddings for 'horror' and 'film' equals that for '"HORROR! Film"'
# since preprocessing cleans punctuation and lowercases.
assert tf.math.reduce_all(tf.equal(horror_film_embedding, output_embeddings[2])).numpy()
# Embedding for '"HORROR! Film"' equal that for 'horror-film' since the
# latter contains a hyphenation and thus is a separate token.
assert not tf.math.reduce_all(
tf.equal(output_embeddings[2], output_embeddings[3])
).numpy()
# Change input_items with your own item tokens
input_items = ["apple", "orange", "hammer", "nails"]
output_embeddings = embedding_model(input_items)
for idx1 in range(len(input_items)):
item1 = input_items[idx1]
embedding1 = output_embeddings[idx1].numpy()
for idx2 in range(idx1 + 1, len(input_items)):
item2 = input_items[idx2]
embedding2 = output_embeddings[idx2].numpy()
similarity = round(cosine_similarity([embedding1], [embedding2])[0][0], 5)
print(f"Similarity between '{item1}' and '{item2}' = {similarity}")
# Delete endpoint resource
# If force is set to True, all deployed models on this Endpoint will be undeployed first.
endpoint.delete(force=True)
# Delete model resource
MODEL_RESOURCE_NAME = model.resource_name
! gcloud ai models delete $MODEL_RESOURCE_NAME --region $REGION --quiet
# Delete Cloud Storage objects that were created
! gsutil -m rm -r $SOURCE_DATA_PATH
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the kernel
Step2: Before you begin
Step3: Otherwise, set your project ID here.
Step4: Timestamp
Step5: Authenticate your Google Cloud account
Step6: Create a Cloud Storage bucket
Step7: Only if your bucket doesn't already exist
Step8: Finally, validate access to your Cloud Storage bucket by examining its contents
Step9: Service Account
Step10: Set service account access for Vertex AI Pipelines
Step11: Import libraries and define constants
Step12: Import packages used in this tutorial.
Step13: Copy and configure the Swivel template
Step14: Change your pipeline configurations
Step15: Both swivel_pipeline_basic.json and swivel_pipeline.json are generated.
Step16: Submit the pipeline to Vertex AI
Step17: After the job is submitted successfully, you can view its details (including run name that you'll need below) and logs.
Step18: When the training starts, you can view the logs in TensorBoard
Step19: For Google Cloud Notebooks, you can do the following
Step20: Load the movie ids and titles for querying embeddings
Step21: Look up embedding by making an online prediction request
Step22: Explore movie embedding similarities
Step23: Create the Swivel job for Wikipedia text embedding (Optional)
Step24: Submit the pipeline job through aiplatform.PipelineJob object.
Step25: Explore the trained text embeddings
Step26: You can use the TensorBoard Embedding Projector to graphically represent high dimensional embeddings, which can be helpful in examining and understanding your embeddings.
|
7,223
|
<ASSISTANT_TASK:>
Python Code:
separate_sf(b.gsw.cat['logMstar'],b.gsw.cat['logSFR'],m1=9.,m2=11,ms_slope=0.592,ms_intercept=-6.05,dm=.2)
logmass = 10.8
yms = -.1968999*logmass**2+4.4186588*logmass-24.607396
print("offset b/w MS and sSFR=-11.5 = {:.3f} ".format(yms- (logmass-11.5)))
print("offset relative to MS sigma = {:.3f}".format((yms- (logmass-11.5))/.3))
flag = b.gsw.cat['__B_T_r'] < 0.3
gswBTcut = b.gsw.cat[flag]
separate_sf(gswBTcut['logMstar'],gswBTcut['logSFR'],m1=9.,m2=11,ms_slope=0.754,ms_intercept=-7.56,dm=.3)
gswlcs = Table.read(homedir+'/research/GSWLC/GSWLC-X2-LCS-Zoverlap.dat')
separate_sf(gswlcs['logMstar'],gswlcs['logSFR'],m1=9.,m2=11,ms_slope=.553,ms_intercept=-5.71,dm=.2)
logmass = 10.7
yms = -.121886*logmass**2+2.912528*logmass-17.1120
print("offset b/w MS and sSFR=-11.5 = {:.3f} ".format(yms- (logmass-11.5)))
print("offset relative to MS sigma = {:.3f}".format((yms- (logmass-11.5))/.3))
10.**.827
.827-.45
lcsflag = (b.lcs.membflag | b.lcs.infallflag) & (b.lcs.cat['logMstar'] > 9.7) & (b.lcs.cat['logSFR']-b.lcs.cat['logMstar'] > -11.5)
logmstar = b.lcs.cat['logMstar'][lcsflag]
from scipy.stats import scoreatpercentile
print("percentile mass of LCS sample = {:.3f}".format(scoreatpercentile(logmstar,90)))
flag = (b.gsw.cat['logMstar'] > 9.7) & (b.gsw.cat['logSFR']-b.gsw.cat['logMstar'] > -11.5)
logmstar = b.gsw.cat['logMstar'][flag]
from scipy.stats import scoreatpercentile
print("percentile mass of field sample = {:.3f}".format(scoreatpercentile(logmstar,90)))
.834-.45
gsw = Table.read(homedir+'/research/GSWLC/GSWLC-X2.dat',format='ascii')
separate_sf(gsw['logMstar'],gsw['logSFR'],m1=9,m2=11,ms_slope=.799,ms_intercept=-7.93,dm=.2)
mycolors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.figure()
plt.hexbin(gswlcs['logMstar'],gswlcs['logSFR'],cmap='gray_r',gridsize=75,bins='log')
xline = np.linspace(8,11.5,100)
# from GSWLC with LCS z cut
yline = .556*xline-5.74
STD = 0.32
offset = -0.62
plt.plot(xline,yline,lw=2,c=mycolors[0],label='GSWLC, Zcut only')
plt.plot(xline,yline-1.5*STD,ls='--',lw=2,c=mycolors[0])
plt.plot(xline,yline+offset,ls=':',lw=2,c=mycolors[0])
plt.fill_between(xline,yline-1.5*STD,yline+offset,color=mycolors[0],alpha=.5)
# from GSWLC, cut according to AGC, Simard, etc
STD = 0.29
offset = -0.7
yline = 0.592*xline -6.18
plt.plot(xline,yline,lw=2,c=mycolors[1],label='GSWLC, all cuts')
plt.plot(xline,yline-1.5*STD,ls='--',lw=2,c=mycolors[1])
plt.plot(xline,yline+offset,ls=':',lw=2,c=mycolors[1])
plt.fill_between(xline,yline-1.5*STD,yline+offset,color=mycolors[1],alpha=.5)
plt.legend()
# from full GSWLC
STD = 0.3
offset = -0.82
yline = 0.799*xline -7.92
#plt.plot(xline,yline,lw=2,c=mycolors[2],label='GSWLC')
#plt.plot(xline,yline-1.5*STD,ls='--',lw=2,c=mycolors[2])
#plt.plot(xline,yline+offset,ls=':',lw=2,c=mycolors[2])
#plt.fill_between(xline,yline-1.5*STD,yline+offset,color=mycolors[2],alpha=.5)
plt.legend()
plt.axis([8,12,-4,2])
plt.xlabel('logMstar')
plt.ylabel('logSFR')
mycolors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.figure()
plt.hexbin(gsw['logMstar'],gsw['logSFR'],cmap='gray_r',gridsize=75,bins='log',extent=(8,12,-4,2))
xline = np.linspace(8,11.5,100)
# from GSWLC with LCS z cut
yline = .556*xline-5.74
STD = 0.32
offset = -0.62
#plt.plot(xline,yline,lw=2,c=mycolors[0],label='GSWLC, Zcut only')
#plt.plot(xline,yline-1.5*STD,ls='--',lw=2,c=mycolors[0])
#plt.plot(xline,yline+offset,ls=':',lw=2,c=mycolors[0])
#plt.fill_between(xline,yline-1.5*STD,yline+offset,color=mycolors[0],alpha=.5)
# from GSWLC, cut according to AGC, Simard, etc
STD = 0.29
offset = -0.7
yline = 0.592*xline -6.18
#plt.plot(xline,yline,lw=2,c=mycolors[1],label='GSWLC, all cuts')
#plt.plot(xline,yline-1.5*STD,ls='--',lw=2,c=mycolors[1])
#plt.plot(xline,yline+offset,ls=':',lw=2,c=mycolors[1])
#plt.fill_between(xline,yline-1.5*STD,yline+offset,color=mycolors[1],alpha=.5)
plt.legend()
# from full GSWLC
STD = 0.3
offset = -0.82
yline = 0.799*xline -7.92
plt.plot(xline,yline,lw=2,c=mycolors[2],label='GSWLC')
plt.plot(xline,yline-1.5*STD,ls='--',lw=2,c=mycolors[2])
plt.plot(xline,yline+offset,ls=':',lw=2,c=mycolors[2])
#plt.fill_between(xline,yline-1.5*STD,yline+offset,color=mycolors[2],alpha=.5)
plt.legend()
plt.axis([8,12,-4,2])
separate_sf(b.lcs.cat['logMstar'],b.lcs.cat['logSFR'],m1=9.,m2=11,ms_slope=0.592,ms_intercept=-6.05,dm=.5)
flag = b.lcs.cat['__B_T_r'] < 0.3
lcsBTcut = b.lcs.cat[flag]
separate_sf(lcsBTcut['logMstar'],lcsBTcut['logSFR'],m1=9.,m2=11,ms_slope=0.754,ms_intercept=-7.56,dm=.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using sample from the paper, with B/T cut
Step2: Using GSWLC, cut in redshift only
Step3: how to pick the mass where we define the offset relative to MS. could use a percentile of LCS mass
Step4: Using full GSWLC
Step5: Summary of fits
|
7,224
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
example = pd.DataFrame(data=np.array([['George', 'Male', '14','Novice', 'Pristina'],
['Mary', 'Female', '14', 'Intermediate', 'Gjilan'],
['Jimmy','Male', '15', 'Novice', 'Kamenica'],
['Judy', 'Female', '13', 'Advanced', 'Peja']],dtype=str),
columns=['name', 'gender', 'age', 'rating', 'municipality'])
print example
filepath = '../LPI Project/Data Sheets/Master LPI sheet.csv'
import pandas as pd
import numpy as np
try:
lpidf = pd.read_csv(filepath, index_col=0)
print "All good... hopefully"
except IOError:
print "double check file path \n\nand no you did not load the data correctly"
lpidf.info()
print "***Summary stats*** \n \n", lpidf['Age'].describe(), "\n"
print "Value Counts\nage, ratio of sample \n \n",
print lpidf['Age'].value_counts(sort=False,normalize=True, ascending=False)
%matplotlib inline
import seaborn as sns
import ggplot
from ggplot import aes
import matplotlib as mpl
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
sns.set(color_codes=True)
sns.set_style("ticks")
sns.set_context("talk")
sns.distplot(lpidf['Age'], kde=True, rug=True, bins=[9,10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ,20])
plt.title("Student Ages")
sns.despine(offset=10, trim=False)
sns.set(style="ticks")
sns.set_context("talk")
sns.violinplot(x="Gender", y="Age", data=lpidf, palette="PRGn")
sns.despine(offset=10, trim=True)
plt.title('Distribution of age by gender')
print "***Ratio of Male and Female*** \n \n" , lpidf['Gender'].value_counts(normalize=True)
sns.set_context("talk")
g = sns.factorplot('municipality', hue='Gender',aspect=4, kind="count", data=lpidf, palette="Set3")
g.set_xticklabels(rotation=45)
plt.title('Number of male and female students surveyed in each municipality')
print 'Example \n\n', example
print '\n\n', pd.crosstab(example['age'], example['rating'], margins=True)
print "***crosstab of LPI rating and age*** \n\n",
print pd.crosstab(lpidf['Age'], lpidf['Rating'], margins=True)
print "\n\n ***crosstab of LPI rating and gender***\n\n",
print pd.crosstab(lpidf['Gender'], lpidf['Rating'], margins=True)
print "\n\n ***crosstab of LPI rating and internet acces***\n\n"
print pd.crosstab(lpidf['Internet Access'], lpidf['Rating'], margins=True)
print "\n\n ***crosstab of LPI rating and Private courses***\n\n"
print pd.crosstab(lpidf['Taking Private Courses'], lpidf['Rating'], margins=True)
print "\n\n***crosstab of LPI rating and school type*** \n\n"
print pd.crosstab(lpidf['school_type'], lpidf['Rating'], margins=True)
print "\n\n***crosstab of LPI rating and English speaking family member*** \n\n",
print pd.crosstab(lpidf['English Speaking family member'], lpidf['Rating'], margins=True)
columns = "Grade"
rows = "Rating"
print pd.crosstab(lpidf[rows], lpidf[columns], margins=True)
sns.set_context("talk")
sns.countplot(x='Rating', hue='Gender',
order=['N/a', 'Novice', 'Intermediate', 'Advanced'], data=lpidf, palette="Set3")
sns.despine(offset=2, trim=False)
plt.title('LPI Rating by Gender')
sns.countplot(x='Rating', hue='Internet Access',
order=['N/a', 'Novice', 'Intermediate', 'Advanced'], data=lpidf, palette="Set2")
sns.despine(offset=2, trim=False)
plt.title('LPI Rating by Internet Access')
sns.countplot(x='Rating', hue='Taking Private Courses',
order=['N/a', 'Novice', 'Intermediate', 'Advanced'], data=lpidf, palette="Set3")
sns.despine(offset=2, trim=False)
plt.title('LPI Rating and Enrollment in Private Engligh courses')
sns.countplot(x='Rating', hue='school_type',
order=['N/a', 'Novice', 'Intermediate', 'Advanced'], data=lpidf, palette="GnBu_d")
sns.despine(offset=2, trim=False)
plt.title('LPI Rating and Type of School attended')
sns.countplot(x='Rating', hue='English Speaking family member',
order=['N/a', 'Novice', 'Intermediate', 'Advanced'], data=lpidf, palette="BrBG")
sns.despine(offset=2, trim=False)
plt.title('LPI Rating and English Speaking Family Members')
from numpy import median
sns.pointplot(x='Rating', y='Age', dodge=True, hue='Gender',
join=False, data=lpidf, pallette='BrBG',
order=['N/a', 'Novice', 'Intermediate', 'Advanced'])
sns.despine(offset=2, trim=True)
plt.title('LPI by age and Gender')
lpidf = lpidf.replace(to_replace = 'yes', value=1)
lpidf = lpidf.replace(to_replace = 'no', value= 0)
lpidf = lpidf.replace(to_replace = 'Yes', value=1)
lpidf = lpidf.replace(to_replace = 'No', value= 0)
lpidf.info()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Alright so lets load up our actual data. First input the filepath for the formated and complete CSV file after filepath = ' '
Step2: Lets take a look at our data to make sure that everything is what we expect.
Step3: Ok lets look at some summary statistics of our data, run the next line of code to get summary statistics on the ages of the students in our sample
Step4: Would you kindly run the next cell to get a visualizations of our students' ages
Step5: The next cell will produce violin plots (a variation of the boxplot) broken down by gender
Step6: Gender
Step7: Lets do a histogram to represent this information and break it down by municipality
Step8: LPI Rating
Step9: Helpful? Maybe or maybe not, typically our brains have an easier time processing numerical information visually like in the graphs above but sometimes simple tables like the ones above can show us some interesting points. Also please note that you may notice that certain columns may not add up to the total number of observations in the dataset. This is simply because in many cases we have missing data from certain observations and the program is just ignoring these datapoints.
Step10: Ok lets move on to visualizing the LPI Ratings, would you kindly run the next cell
Step11: Ok so now lets go ahead and start digging into what factors could be influencing student scores on the LPI
Step12: This next few graphs will show a series of linear models between LPI score and the various factors we've collected, though in order to make the mathematical bits and pieces work, we have to convert our LPI rankings and yes/no questions to numerical rankings. see the explanations bellow
|
7,225
|
<ASSISTANT_TASK:>
Python Code:
print("Hello World!")
str01 = "Hello World!"
str02 = "22"
str03 = "This is so c00l!"
print(str01, str02, str03)
print(type(str02))
children = 5
type(children)
new_children = float(children)
type(new_children)
stringy_kids = str(children)
type(stringy_kids)
text01 = "The name's Bond, James Bond."
print(text01)
text02 = The name's Bond, James Bond.
text03 = Text here
text04 = Here we go!
print(text02)
print(text03)
print(text04)
text05 = 'The name's Bond, James Bond.'
print(text05)
text05 = 'The name\'s Bond, James Bond.'
print(text05)
text06 = 'The name\'s Bond, James Bond.\nYes really!'
print(text06)
text07 = 'The name\'s Bond, Tabbed Bond.\tYes really!'
print(text07)
text08 = "The name\'s Bond, Tabbed Bond. Yes really!"
print(text08)
print("Please Access C:\home\test instead of C:\\games\n\hello")
print(r"Please Access C:\home\test instead of C:\\games\n\hello")
age = 42
print("I am {} years old.".format(age))
print("I am {} years old".format(age))
print("Hello World!")
fighter1 = input()
fighter2 = input()
type(fighter1)
print("We went to see a bout between {} and {}. {} totally kicked ass!".format(fighter1,fighter2, fighter1))
show = 'Monty Python'
show[0]
show[-1]
show[0:4]
show[0:5]
show[-7:]
show[-3:]
show[1:]
# Enter your code below:
sherlocked = "To Sherlock Holmes she is always the woman."
print(sherlocked)
print(len(sherlocked))
print(sherlocked.upper())
print(sherlocked.lower())
sherlocked.find("she")
pronoun = sherlocked.find("she")
print(pronoun)
# Where can we find the first occurance of 'she'?
print("The world 'she' first appears at index", pronoun)
watson = "In the year 1878 I took my degree of Doctor of Medicine of the University of London, and proceeded to Netley to go through the course prescribed for surgeons in the army. Having completed my studies there, I was duly attached to the Fifth Northumberland Fusiliers as Assistant Surgeon. The regiment was stationed in India at the time, and before I could join it, the second Afghan war had broken out. On landing at Bombay, I learned that my corps had advanced through the passes, and was already deep in the enemy’s country. I followed, however, with many other officers who were in the same situation as myself, and succeeded in reaching Candahar in safety, where I found my regiment, and at once entered upon my new duties."
print(watson)
first_appearance = watson.find("my")
first_appearance
second_appearance = watson.find("my", first_appearance + 1)
second_appearance
watson.count("on")
print("A Study in Scarlet".split(" "))
watson.count("me")
watson.startswith("Sherlock")
watson.endswith("duties.")
watson.split("and")
watson2 = " Hello "
watson2.strip()
watson3 = watson.replace(",","")
watson3 = watson3.replace(".", "")
watson3
watson3.split(" ")
ice_cream = "chocolate vanilla banana caramel"
ice_cream = ice_cream.split(" ")
print(ice_cream)
ice_cream[0]
ice_cream[-1]
ice_cream[1]
len(ice_cream)
# gavin@hooley.com
passage = During the first week or so we had no callers, and I had begun to think that my companion was as friendless a man as I was myself. Presently, however, I found that he had many acquaintances, and those in the most different classes of society. There was one little sallow rat-faced, dark-eyed fellow who was introduced to me as Mr. Lestrade, and who came three or four times in a single week. One morning a young girl called, fashionably dressed, and stayed for half an hour or more. The same afternoon brought a grey-headed, seedy visitor, looking like a Jew pedlar, who appeared to me to be much excited, and who was closely followed by a slip-shod elderly woman. On another occasion an old white-haired gentleman had an interview with my companion; and on another a railway porter in his velveteen uniform. When any of these nondescript individuals put in an appearance, Sherlock Holmes used to beg for the use of the sitting-room, and I would retire to my bed-room. He always apologized to me for putting me to this inconvenience. “I have to use this room as a place of business,” he said, “and these people are my clients.” Again I had an opportunity of asking him a point blank question, and again my delicacy prevented me from forcing another man to confide in me. I imagined at the time that he had some strong reason for not alluding to it, but he soon dispelled the idea by coming round to the subject of his own accord.
It was upon the 4th of March, as I have good reason to remember, that I rose somewhat earlier than usual, and found that Sherlock Holmes had not yet finished his breakfast. The landlady had become so accustomed to my late habits that my place had not been laid nor my coffee prepared. With the unreasonable petulance of mankind I rang the bell and gave a curt intimation that I was ready. Then I picked up a magazine from the table and attempted to while away the time with it, while my companion munched silently at his toast. One of the articles had a pencil mark at the heading, and I naturally began to run my eye through it.
Its somewhat ambitious title was “The Book of Life,” and it attempted to show how much an observant man might learn by an accurate and systematic examination of all that came in his way. It struck me as being a remarkable mixture of shrewdness and of absurdity. The reasoning was close and intense, but the deductions appeared to me to be far-fetched and exaggerated. The writer claimed by a momentary expression, a twitch of a muscle or a glance of an eye, to fathom a man’s inmost thoughts. Deceit, according to him, was an impossibility in the case of one trained to observation and analysis. His conclusions were as infallible as so many propositions of Euclid. So startling would his results appear to the uninitiated that until they learned the processes by which he had arrived at them they might well consider him as a necromancer.
“From a drop of water,” said the writer, “a logician could infer the possibility of an Atlantic or a Niagara without having seen or heard of one or the other. So all life is a great chain, the nature of which is known whenever we are shown a single link of it. Like all other arts, the Science of Deduction and Analysis is one which can only be acquired by long and patient study nor is life long enough to allow any mortal to attain the highest possible perfection in it. Before turning to those moral and mental aspects of the matter which present the greatest difficulties, let the enquirer begin by mastering more elementary problems. Let him, on meeting a fellow-mortal, learn at a glance to distinguish the history of the man, and the trade or profession to which he belongs. Puerile as such an exercise may seem, it sharpens the faculties of observation, and teaches one where to look and what to look for. By a man’s finger nails, by his coat-sleeve, by his boot, by his trouser knees, by the callosities of his forefinger and thumb, by his expression, by his shirt cuffs—by each of these things a man’s calling is plainly revealed. That all united should fail to enlighten the competent enquirer in any case is almost inconceivable.”
“What ineffable twaddle!” I cried, slapping the magazine down on the table, “I never read such rubbish in my life.”
“What is it?” asked Sherlock Holmes.
“Why, this article,” I said, pointing at it with my egg spoon as I sat down to my breakfast. “I see that you have read it since you have marked it. I don’t deny that it is smartly written. It irritates me though. It is evidently the theory of some arm-chair lounger who evolves all these neat little paradoxes in the seclusion of his own study. It is not practical. I should like to see him clapped down in a third class carriage on the Underground, and asked to give the trades of all his fellow-travellers. I would lay a thousand to one against him.”
“You would lose your money,” Sherlock Holmes remarked calmly. “As for the article I wrote it myself.”
“You!”
“Yes, I have a turn both for observation and for deduction. The theories which I have expressed there, and which appear to you to be so chimerical are really extremely practical—so practical that I depend upon them for my bread and cheese.”
“And how?” I asked involuntarily.
“Well, I have a trade of my own. I suppose I am the only one in the world. I’m a consulting detective, if you can understand what that is. Here in London we have lots of Government detectives and lots of private ones. When these fellows are at fault they come to me, and I manage to put them on the right scent. They lay all the evidence before me, and I am generally able, by the help of my knowledge of the history of crime, to set them straight. There is a strong family resemblance about misdeeds, and if you have all the details of a thousand at your finger ends, it is odd if you can’t unravel the thousand and first. Lestrade is a well-known detective. He got himself into a fog recently over a forgery case, and that was what brought him here.”
“And these other people?”
“They are mostly sent on by private inquiry agencies. They are all people who are in trouble about something, and want a little enlightening. I listen to their story, they listen to my comments, and then I pocket my fee.”
“But do you mean to say,” I said, “that without leaving your room you can unravel some knot which other men can make nothing of, although they have seen every detail for themselves?”
“Quite so. I have a kind of intuition that way. Now and again a case turns up which is a little more complex. Then I have to bustle about and see things with my own eyes. You see I have a lot of special knowledge which I apply to the problem, and which facilitates matters wonderfully. Those rules of deduction laid down in that article which aroused your scorn, are invaluable to me in practical work. Observation with me is second nature. You appeared to be surprised when I told you, on our first meeting, that you had come from Afghanistan.”
“You were told, no doubt.”
“Nothing of the sort. I knew you came from Afghanistan. From long habit the train of thoughts ran so swiftly through my mind, that I arrived at the conclusion without being conscious of intermediate steps. There were such steps, however. The train of reasoning ran, ‘Here is a gentleman of a medical type, but with the air of a military man. Clearly an army doctor, then. He has just come from the tropics, for his face is dark, and that is not the natural tint of his skin, for his wrists are fair. He has undergone hardship and sickness, as his haggard face says clearly. His left arm has been injured. He holds it in a stiff and unnatural manner. Where in the tropics could an English army doctor have seen much hardship and got his arm wounded? Clearly in Afghanistan.’ The whole train of thought did not occupy a second. I then remarked that you came from Afghanistan, and you were astonished.”
“It is simple enough as you explain it,” I said, smiling. “You remind me of Edgar Allen Poe’s Dupin. I had no idea that such individuals did exist outside of stories.”
Sherlock Holmes rose and lit his pipe. “No doubt you think that you are complimenting me in comparing me to Dupin,” he observed. “Now, in my opinion, Dupin was a very inferior fellow. That trick of his of breaking in on his friends’ thoughts with an apropos remark after a quarter of an hour’s silence is really very showy and superficial. He had some analytical genius, no doubt; but he was by no means such a phenomenon as Poe appeared to imagine.”
“Have you read Gaboriau’s works?” I asked. “Does Lecoq come up to your idea of a detective?”
Sherlock Holmes sniffed sardonically. “Lecoq was a miserable bungler,” he said, in an angry voice; “he had only one thing to recommend him, and that was his energy. That book made me positively ill. The question was how to identify an unknown prisoner. I could have done it in twenty-four hours. Lecoq took six months or so. It might be made a text-book for detectives to teach them what to avoid.”
I felt rather indignant at having two characters whom I had admired treated in this cavalier style. I walked over to the window, and stood looking out into the busy street. “This fellow may be very clever,” I said to myself, “but he is certainly very conceited.”
“There are no crimes and no criminals in these days,” he said, querulously. “What is the use of having brains in our profession. I know well that I have it in me to make my name famous. No man lives or has ever lived who has brought the same amount of study and of natural talent to the detection of crime which I have done. And what is the result? There is no crime to detect, or, at most, some bungling villainy with a motive so transparent that even a Scotland Yard official can see through it.”
I was still annoyed at his bumptious style of conversation. I thought it best to change the topic.
“I wonder what that fellow is looking for?” I asked, pointing to a stalwart, plainly-dressed individual who was walking slowly down the other side of the street, looking anxiously at the numbers. He had a large blue envelope in his hand, and was evidently the bearer of a message.
“You mean the retired sergeant of Marines,” said Sherlock Holmes.
“Brag and bounce!” thought I to myself. “He knows that I cannot verify his guess.”
The thought had hardly passed through my mind when the man whom we were watching caught sight of the number on our door, and ran rapidly across the roadway. We heard a loud knock, a deep voice below, and heavy steps ascending the stair.
“For Mr. Sherlock Holmes,” he said, stepping into the room and handing my friend the letter.
Here was an opportunity of taking the conceit out of him. He little thought of this when he made that random shot. “May I ask, my lad,” I said, in the blandest voice, “what your trade may be?”
“Commissionaire, sir,” he said, gruffly. “Uniform away for repairs.”
“And you were?” I asked, with a slightly malicious glance at my companion.
“A sergeant, sir, Royal Marine Light Infantry, sir. No answer? Right, sir.”
He clicked his heels together, raised his hand in a salute, and was gone.
# Count Sherlock
# Count Lestrade
mail1 = "gavin@hooley.com".split("@")
mail1
mail1[0]
Username = mail1[0]
Domain = mail1[1]
print("Username:",Username, "\nDomain:",Domain)
# Solution for Sherlock and Lestrade
print("Number of times 'Sherlock' appears in passage:", passage.count("Sherlock"))
print("Number of times 'Lestrade' appears in passage:", passage.count("Lestrade"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: "Hello World!" was the string. Absolutely any character can be a string, including numbers, as long as they are within single, double, or triple quotation marks.
Step2: Now remember how we converted an int to a float, and vice versa?
Step3: You can convert them to strings too!
Step7: Switching between Single, Double and Triple Quotes
Step8: Say hello to escape characteters!
Step9: "Raw" Strings
Step10: String Substitution
Step11: Did you see above how we used .format to substitute a string? This is so useful when writing more complex functions with strings. Let's see another example.
Step12: Indexing Strings
Step13: Exercise
Step14: String Operations
Step15: Like most things in Python, we can assign that to a variable
Step16: Exercise
Step17: More Splits
Step18: Exercise
Step20: Exercise
Step21: Solution for Email Splitting Problem
|
7,226
|
<ASSISTANT_TASK:>
Python Code:
from ROP import *
#Takes a little bit, wait a while.
#ROP Number syntax: ###
#Eye Drop syntax: HH MM HH MM HH MM
#Exam Syntax: HH MM HH MM
print 'Baseline Averages\n', 'NIRS :\t', avg0NIRS, '\nPI :\t',avg0PI, '\nSpO2 :\t',avg0O2,'\nPR :\t',avg0PR,
print resultdrops1
print resultdrops2
print resultdrops3
print result1
print result2
print result3
print result4
print result5
print "Desat Counts for X mins\n"
print "Pre Mild Desat (85-89) Count: %s\t" %above, "for %s min" %((a_len*2)/60.)
print "Pre Mod Desat (81-84) Count: %s\t" %middle, "for %s min" %((m_len*2)/60.)
print "Pre Sev Desat (=< 80) Count: %s\t" %below, "for %s min\n" %((b_len*2)/60.)
print "Post Mild Desat (85-89) Count: %s\t" %above2, "for %s min" %((a_len2*2)/60.)
print "Post Mod Desat (81-84) Count: %s\t" %middle2, "for %s min" %((m_len2*2)/60.)
print "Post Sev Desat (=< 80) Count: %s\t" %below2, "for %s min\n" %((b_len2*2)/60.)
print "Data Recording Time!"
print '*' * 10
print "Pre-Exam Data Recording Length\t", X - Y # start of exam - first data point
print "Post-Exam Data Recording Length\t", Q - Z #last data point - end of exam
print "Total Data Recording Length\t", Q - Y #last data point - first data point
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Baseline Average Calculation
Step2: First Eye Drop Avg Every 10 Sec For 5 Minutes
Step3: Second Eye Drop Avg Every 10 Sec For 5 Minutes
Step4: Third Eye Drop Avg Every 10 Sec For 5 Minutes
Step5: Average Every 10 Sec During ROP Exam for first 4 minutes
Step6: Average Every 5 Mins Hour 1-2 After ROP Exam
Step7: Average Every 15 Mins Hour 2-3 After ROP Exam
Step8: Average Every 30 Mins Hour 3-4 After ROP Exam
Step9: Average Every Hour 4-24 Hours Post ROP Exam
Step10: Mild, Moderate, and Severe Desaturation Events
|
7,227
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os, sys
sys.path.append(os.path.abspath('../../main/python'))
import numpy as np
import matplotlib.pyplot as plt
import thalesians.tsa.numpyutils as npu
import thalesians.tsa.processes as proc
import thalesians.tsa.randomness as rnd
import thalesians.tsa.simulation as sim
X = proc.ItoProcess(drift=lambda t, x: -x, diffusion=lambda t, x: .25)
rnd.random_state(np.random.RandomState(seed=42), force=True);
ts = []; xs = []
for t, x in sim.EulerMaruyama(process=X, times=sim.xtimes(0., 100.)):
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.ItoProcess(drift=lambda t, x: -x, diffusion=.25)
def propagate(self, time0, value0, time, variate=None, state0=None, random_state=None):
raise NotImplementedError()
X = proc.OrnsteinUhlenbeckProcess(transition=1, vol=.25)
rnd.random_state(np.random.RandomState(seed=42), force=True);
ts = []; xs = []
for t, x in sim.EulerMaruyama(process=X, times=sim.xtimes(0., 100.)):
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, times=sim.xtimes(0., 100.))
df = sim.run(em)
plt.plot(df);
isinstance(X, proc.SolvedItoProcess)
rnd.random_state(np.random.RandomState(seed=42), force=True);
x = 0.
ts = [0.]; xs = [x]
for t, v in zip(sim.xtimes(1., 100., 1.), rnd.multivariate_normals(ndim=1)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
def propagate_distr(self, time0, distr0, time):
pass
def propagate(self, time0, value0, time, variate=None, state0=None, random_state=None):
if self.noisedim != self.processdim:
raise NotImplementedError('Cannot utilize the propagate_distr of the Markov process in propagate if noisedim != processdim; provide a custom implementation')
if time == time0: return npu.tondim2(value0, ndim1tocol=True, copy=True)
value0 = npu.tondim2(value0, ndim1tocol=True, copy=False)
variate = npu.tondim2(variate, ndim1tocol=True, copy=False)
distr = self.propagate_distr(time, time0, distrs.NormalDistr.creatediracdelta(value0))
return distr.mean + np.dot(np.linalg.cholesky(distr.cov), variate)
X = proc.WienerProcess()
x0 = 0.
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=1., step=1E-3))
df = sim.run(em)
plt.plot(df);
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = [x0]
ts = [0.]; xs = [x]
for t, v in zip(sim.xtimes(0., 1., 1E-3), rnd.multivariate_normals(ndim=1)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.WienerProcess(mean=3., vol=4.)
x0 = 7.
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=5., step=1E-3))
df = sim.run(em)
plt.plot(df);
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = [x0]
ts = [0.]; xs = [x]
for t, v in zip(sim.xtimes(0., 5., 1E-3), rnd.multivariate_normals(ndim=1)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.WienerProcess.create_from_cov(mean=[3., 5.], cov=[[16., -8.], [-8., 16.]])
x0 = npu.col(7., 8.)
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=5., step=1E-3))
df = sim.run(em)
plt.plot(df);
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = x0
ts = [0.]; xs = [x0.flatten()]
for t, v in zip(sim.xtimes(0., 5., 1E-3), rnd.multivariate_normals(ndim=2)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.BrownianBridge()
rnd.random_state(np.random.RandomState(seed=42), force=True);
ts = []; xs = []
for t, x in sim.EulerMaruyama(process=X, initial_value=0., times=sim.xtimes(0., 1., .005)):
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
rnd.random_state(np.random.RandomState(seed=42), force=True);
x = [0.]
ts = [0.]; xs = [x]
for t, v in zip(sim.xtimes(0., 1., .005), rnd.multivariate_normals(ndim=1)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.BrownianBridge(10., 15., 0., 10.)
rnd.random_state(np.random.RandomState(seed=42), force=True);
ts = []; xs = []
for t, x in sim.EulerMaruyama(process=X, initial_value=10., times=sim.xtimes(0., 10., .005)):
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
rnd.random_state(np.random.RandomState(seed=42), force=True);
x = [10.]
ts = [0.]; xs = [x]
for t, v in zip(sim.xtimes(0., 10., .005), rnd.multivariate_normals(ndim=1)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
x0 = npu.col(10., 7.)
cov = [[1., -2.], [-2., 9.]]
X = proc.BrownianBridge.create_from_cov(x0, npu.col(15., 3.), 0., 10., cov)
rnd.random_state(np.random.RandomState(seed=42), force=True);
ts = []; xs = []
for t, x in sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(0., 10., .005)):
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
xs[-1]
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = x0
ts = [0.]; xs = [x0.flatten()]
for t, v in zip(sim.xtimes(0., 10., .005), rnd.multivariate_normals(ndim=2)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
xs[-1]
start_time = 0.
end_time = 10.
times = np.linspace(0., 10., 1000)
start_value = 10.
end_value = 15.
times_col = npu.to_ndim_2(times, ndim_1_to_col=True)
mean = start_value + (times_col - start_time) / (end_time - start_time) * (end_value - start_value)
cov = np.array([[(end_time - max(times[i], times[j])) * (min(times[i], times[j]) - start_time) / (end_time - start_time) for j in range(len(times))] for i in range(len(times))])
values = rnd.multivariate_normal(mean, cov)
all_times = np.concatenate(([start_time], times, [end_time]))
all_values = np.concatenate(([start_value], values, [end_value]))
plt.plot(all_times, all_values);
X = proc.GeometricBrownianMotion()
x0 = .3
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=1., step=1E-3))
df = sim.run(em)
plt.plot(df);
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = [x0]
ts = [0.]; xs = [x]
for t, v in zip(sim.xtimes(0., 1., 1E-3), rnd.multivariate_normals(ndim=1)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.WienerProcess()
x0 = .3
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=1., step=1E-3))
df = sim.run(em)
plt.plot(df);
X = proc.GeometricBrownianMotion.create_from_pct_cov(pct_drift=[3., 5.], pct_cov=[[16., -8.], [-8., 16.]])
x0 = npu.col(7., 8.)
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=1., step=1E-3))
df = sim.run(em)
plt.plot(df);
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = x0
ts = [0.]; xs = [x0.flatten()]
for t, v in zip(sim.xtimes(0., 1., 1E-3), rnd.multivariate_normals(ndim=2)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.OrnsteinUhlenbeckProcess(transition=1., vol=1.)
x0 = 0.
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=5., step=.01))
df = sim.run(em)
plt.plot(df);
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = [0.]
ts = [0.]; xs = [x]
for t, v in zip(sim.xtimes(0., 5., .01), rnd.multivariate_normals(ndim=1)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
X = proc.OrnsteinUhlenbeckProcess.create_from_cov(
transition=[[10., 0.], [0., 10.]],
mean=[3., 5.],
cov=[[9., -7.5], [-7.5, 25.]])
x0 = npu.col(7., 8.)
rnd.random_state(np.random.RandomState(seed=42), force=True)
em = sim.EulerMaruyama(process=X, initial_value=x0, times=sim.xtimes(start=0., stop=5., step=.01))
df = sim.run(em)
plt.plot(df);
rnd.random_state(np.random.RandomState(seed=42), force=True)
x = x0
ts = [0.]; xs = [x0.flatten()]
for t, v in zip(sim.xtimes(0., 5., .01), rnd.multivariate_normals(ndim=2)):
x = X.propagate(ts[-1], x, t, v)
ts.append(t); xs.append(x.flatten())
plt.plot(ts, xs);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ...and import some Python modules
Step2: Ito processes
Step3: It can then be approximated with a stochastic time discrete approximation, such as the Euler-Maruyama strong Taylor approximation scheme
Step4: Since in this particular case the diffusion coefficient is constant, we could have defined X as
Step5: Solved Ito processes
Step6: Given the time time0 and the process's value at that time, value0, and (if the process is stateful) the process's state, state0, at time0, as well as the random variate variate corresponding to the actual increment in the driving Brownian motion $W$, the propagate method will return the value of the process at time, time >= time0. If propagate is implemented, there is no need to resort to approximate schemes, such as the Euler-Maruyama scheme demonstrated above.
Step7: We make sure that we generate it with the same random seed...
Step8: ...and verify that the graph is unchanged when we apply EulerMaruyama to this process, now instantiated as an OrnsteinUhlenbeckProcess, rather than an ItoProcess
Step9: Instead of looping explicitly, we could have used the method run
Step10: Now, since
Step11: we don't need to apply Euler-Maruyama to produce a trajectory of this process and can use the propagate method instead
Step12: Markov processes
Step13: This method represents the transition kernel of the Markov process
Step14: Gaussian and Gauss-Markov processes
Step15: Univariate variance-scaled Wiener process with drift
Step16: Multivariate variance-scaled, correlated Wiener process with drift
Step17: Brownian Bridge
Step18: Generalized Brownian bridge
Step19: Multivariate Brownian bridge
Step20: A more efficient method for simulating a Brownian bridge
Step21: Geometric Brownian motion
Step22: Multivariate variance-scaled, correlated geometric Brownian motion with drift
Step23: Ornstein-Uhlenbeck process
Step24: Multivariate Ornstein-Uhlenbeck process
|
7,228
|
<ASSISTANT_TASK:>
Python Code:
# Use the chown command to change the ownership of the repository.
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.3.0 || pip install tensorflow==2.3.0
# Install the required numpy version.
!pip install numpy==1.21.0
# Importing necessary modules/libraries such as numpy, pandas and datetime.
import datetime
import os
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import aiplatform
from matplotlib import pyplot as plt
from tensorflow import feature_column as fc
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Input, Dense, DenseFeatures, concatenate
from tensorflow.keras.models import Sequential
print(tf.__version__)
%matplotlib inline
# It sets the backend of matplotlib to the 'inline' backend. The output of plotting commands is displayed inline within frontends, directly
# below the code cell that produced it. The resulting plots will then also be stored in the notebook document.
%matplotlib inline
!ls -l ../data/*.csv
# Selecting specific CSV_COLUMNS, LABEL_COLUMN, DEFAULTS, UNWANTED_COLS.
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key'
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
UNWANTED_COLS = ['pickup_datetime', 'key']
# Create an input function reading a file using the Dataset API
def features_and_labels(row_data):
label = row_data.pop(LABEL_COLUMN)
features = row_data
for unwanted_col in UNWANTED_COLS:
features.pop(unwanted_col)
return features, label
# Reading CSV files into a dataset.
def create_dataset(pattern, batch_size=1, mode='eval'):
dataset = tf.data.experimental.make_csv_dataset(
pattern, batch_size, CSV_COLUMNS, DEFAULTS)
dataset = dataset.map(features_and_labels)
if mode == 'train':
dataset = dataset.shuffle(buffer_size=1000).repeat()
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
# TODO 1
# 1. Bucketize latitudes and longitudes
NBUCKETS = 16
latbuckets = np.linspace(start=38.0, stop=42.0, num=NBUCKETS).tolist()
lonbuckets = np.linspace(start=-76.0, stop=-72.0, num=NBUCKETS).tolist()
fc_bucketized_plat = fc.bucketized_column(
source_column=fc.numeric_column("pickup_longitude"), boundaries=lonbuckets)
fc_bucketized_plon = fc.bucketized_column(
source_column=fc.numeric_column("pickup_latitude"), boundaries=latbuckets)
fc_bucketized_dlat = fc.bucketized_column(
source_column=fc.numeric_column("dropoff_longitude"), boundaries=lonbuckets)
fc_bucketized_dlon = fc.bucketized_column(
source_column=fc.numeric_column("dropoff_latitude"), boundaries=latbuckets)
# 2. Cross features for locations
fc_crossed_dloc = fc.crossed_column(
[fc_bucketized_dlat, fc_bucketized_dlon],
hash_bucket_size=NBUCKETS * NBUCKETS)
fc_crossed_ploc = fc.crossed_column(
[fc_bucketized_plat, fc_bucketized_plon],
hash_bucket_size=NBUCKETS * NBUCKETS)
fc_crossed_pd_pair = fc.crossed_column(
[fc_crossed_dloc, fc_crossed_ploc],
hash_bucket_size=NBUCKETS**4)
# 3. Create embedding columns for the crossed columns
fc_pd_pair = fc.embedding_column(categorical_column=fc_crossed_pd_pair, dimension=3)
fc_dloc = fc.embedding_column(categorical_column=fc_crossed_dloc, dimension=3)
fc_ploc = fc.embedding_column(categorical_column=fc_crossed_ploc, dimension=3)
# TODO 2
wide_columns = [
# One-hot encoded feature crosses
fc.indicator_column(fc_crossed_dloc),
fc.indicator_column(fc_crossed_ploc),
fc.indicator_column(fc_crossed_pd_pair)
]
deep_columns = [
# Embedding_column to "group" together ...
fc.embedding_column(fc_crossed_pd_pair, dimension=10),
# Numeric columns
fc.numeric_column("pickup_latitude"),
fc.numeric_column("pickup_longitude"),
fc.numeric_column("dropoff_longitude"),
fc.numeric_column("dropoff_latitude")
]
INPUT_COLS = [
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count'
]
inputs = {colname : Input(name=colname, shape=(), dtype='float32')
for colname in INPUT_COLS
}
# Here, tf.reduce_mean computes the mean of elements across dimensions of a tensor.
# tf.sqrt Computes element-wise square root of the input tensor.
# tf.square computes square of x element-wise.
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
# TODO 3
def build_model(dnn_hidden_units):
# Create the deep part of model
deep = DenseFeatures(deep_columns, name='deep_inputs')(inputs)
for num_nodes in dnn_hidden_units:
deep = Dense(num_nodes, activation='relu')(deep)
# Create the wide part of model
wide = DenseFeatures(wide_columns, name='wide_inputs')(inputs)
# Combine deep and wide parts of the model
combined = concatenate(inputs=[deep, wide], name='combined')
# Map the combined outputs into a single prediction value
output = Dense(units=1, activation=None, name='prediction')(combined)
# Finalize the model
model = Model(inputs=list(inputs.values()), outputs=output)
# Compile the keras model
model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"])
return model
HIDDEN_UNITS = [10,10]
# Calling the build model
model = build_model(dnn_hidden_units=HIDDEN_UNITS)
# Converts a Keras plot_model to see a diagram of the model that we have created.
tf.keras.utils.plot_model(model, show_shapes=False, rankdir='LR')
BATCH_SIZE = 1000
NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset will repeat, wrap around
NUM_EVALS = 50 # how many times to evaluate
NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample
trainds = create_dataset(
pattern='../data/taxi-train*',
batch_size=BATCH_SIZE,
mode='train')
evalds = create_dataset(
pattern='../data/taxi-valid*',
batch_size=BATCH_SIZE,
mode='eval').take(NUM_EVAL_EXAMPLES//1000)
%%time
# Here, %%time prints the wall time for the entire cell
steps_per_epoch = NUM_TRAIN_EXAMPLES // (BATCH_SIZE * NUM_EVALS)
OUTDIR = "./taxi_trained"
shutil.rmtree(path=OUTDIR, ignore_errors=True) # start fresh each time
history = model.fit(x=trainds,
steps_per_epoch=steps_per_epoch,
epochs=NUM_EVALS,
validation_data=evalds,
callbacks=[TensorBoard(OUTDIR)])
RMSE_COLS = ['rmse', 'val_rmse']
# Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns).
pd.DataFrame(history.history)[RMSE_COLS].plot()
model.predict(
x={
"pickup_longitude": tf.convert_to_tensor([-73.982683]),
"pickup_latitude": tf.convert_to_tensor([40.742104]),
"dropoff_longitude": tf.convert_to_tensor([-73.983766]),
"dropoff_latitude": tf.convert_to_tensor([40.755174]),
"passenger_count": tf.convert_to_tensor([3.0]),
},
steps=1,
)
OUTPUT_DIR = "./export/savedmodel"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
TIMESTAMP = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
EXPORT_PATH = os.path.join(OUTPUT_DIR, TIMESTAMP)
tf.saved_model.save(model, EXPORT_PATH) # with default serving function
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir {EXPORT_PATH}
!find {EXPORT_PATH}
os.environ['EXPORT_PATH'] = EXPORT_PATH
PROJECT = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = PROJECT[0]
BUCKET = PROJECT
REGION = "us-central1"
MODEL_DISPLAYNAME = f"taxifare_keras_functional-{TIMESTAMP}"
print(f"MODEL_DISPLAYNAME: {MODEL_DISPLAYNAME}")
# from https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-3:latest"
)
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
%%bash
# Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${BUCKET}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${BUCKET}
echo "\nHere are your current buckets:"
gsutil ls
fi
!gsutil cp -R $EXPORT_PATH gs://$BUCKET/$MODEL_DISPLAYNAME
uploaded_model = aiplatform.Model.upload(
display_name=MODEL_DISPLAYNAME,
artifact_uri=f"gs://{BUCKET}/{MODEL_DISPLAYNAME}",
serving_container_image_uri=SERVING_CONTAINER_IMAGE_URI,
)
MACHINE_TYPE = "n1-standard-2"
endpoint = uploaded_model.deploy(
machine_type=MACHINE_TYPE,
accelerator_type=None,
accelerator_count=None,
)
instance = {
"pickup_longitude": -73.982683,
"pickup_latitude": 40.742104,
"dropoff_longitude": -73.983766,
"dropoff_latitude": 40.755174,
"passenger_count": 3.0,
}
endpoint.predict([instance])
endpoint.undeploy_all()
endpoint.delete()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Kindly ignore the deprecation warnings and incompatibility errors.
Step2: Kindly ignore the deprecation warnings and incompatibility errors.
Step3: Load raw data
Step4: Use tf.data to read the CSV files
Step5: Feature columns for Wide and Deep model
Step6: Gather list of feature columns
Step7: Build a Wide and Deep model in Keras
Step8: Then, we'll define our custom RMSE evaluation metric and build our wide and deep model.
Step9: Next, we can call the build_model to create the model. Here we'll have two hidden layers, each with 10 neurons, for the deep part of our model. We can also use plot_model to see a diagram of the model we've created.
Step10: Next, we'll set up our training variables, create our datasets for training and validation, and train our model.
Step11: Just as before, we can examine the history to see how the RMSE changes through training on the train set and validation set.
Step12: Making predictions with our model
Step13: Export and deploy our model
Step14: Deploy our model to Vertex AI
Step15: <img src='assets/taxi_fare_keras_func_model.png' width='80%'>
Step16: Cleanup
|
7,229
|
<ASSISTANT_TASK:>
Python Code:
#imports
from __future__ import division
import pandas as pd
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pylab as pl
%matplotlib inline
import seaborn as sns
#Read in data from source
df_raw = pd.read_csv("../assets/admissions.csv")
print df_raw.head()
df_raw.count()
df_raw.count().sum()
summary_stats_admissions = df_raw.describe()
summary_stats_admissions
# Compute quantiles of gre
gre_quantiles = pd.qcut(df_raw['gre'], 4)
gre_quantiles.value_counts().sort_index()
# Compute quantiles of gpa
gpa_quantiles = pd.qcut(df_raw['gpa'], 4)
gpa_quantiles.value_counts().sort_index()
# What is the sample size distribution among quantiles of gre and gpa by prestige level?
df_raw.pivot_table(['gre'], ['admit', gre_quantiles], [gpa_quantiles, 'prestige'], aggfunc=[len])
# What is the standard deviation distribution among quantiles of gre and gpa by prestige level?
df_raw.pivot_table(['gre'], ['admit', gre_quantiles], [gpa_quantiles, 'prestige'], aggfunc=[np.std])
# Inspect gre, gpa std
df_raw.std()[['gre', 'gpa']]
# Which columns have missing data?
df_raw.isnull().sum()
# Which records are null?
df_raw[df_raw.isnull().any(axis=1)]
# What is shape of dataframe before dropping records?
shape_before_dropna = df_raw.shape
print(shape_before_dropna)
# Inspect shape before dropping missing values
shape_after_dropna = df_raw.dropna(how='any').shape
print(shape_after_dropna)
# Now, drop missing values
df_raw.dropna(how='any', inplace=True)
#boxplot 1
#df_raw.boxplot('gre')
sns.boxplot('gre', data=df_raw)
sns.plt.title('GRE: Box and Whiskers Plot')
#boxplot 2
#df_raw.boxplot('gpa')
sns.boxplot('gpa', data=df_raw)
sns.plt.title('GPA: Box and Whiskers Plot')
# plot the distribution of each variable
df_raw.plot(kind='density', subplots=True, layout=(2, 2), sharex=False)
plt.show()
# Test for normality using the Kolmogorov-Smirnov Test
# GRE normal?
print('GRE: ', stats.kstest(df_raw.gre, 'norm'))
print('Kurtosis: ', df_raw.gre.kurt())
print('Skew: ', df_raw.gre.skew())
print('~~~~~~~~~~~')
# GPA normal?
print('GPA : ', stats.kstest(df_raw.gpa, 'norm'))
print('Kurtosis: ', df_raw.gpa.kurt())
print('Skew: ', df_raw.gpa.skew())
print('~~~~~~~~~~~')
# Admit normal?
print('Admit: ', stats.kstest(df_raw.admit, 'norm'))
print('Kurtosis: ', df_raw.admit.kurt())
print('Skew: ', df_raw.admit.skew())
print('~~~~~~~~~~~')
# Prestige normal?
print('Prestige: ', stats.kstest(df_raw.prestige, 'norm'))
print('Kurtosis: ', df_raw.prestige.kurt())
print('Skew: ', df_raw.prestige.skew())
# GRE IQR
q3_gre = summary_stats_admissions.gre['75%']
q1_gre = summary_stats_admissions.gre['25%']
iqr_gre = q3_gre - q1_gre
low_fence_gre = q1_gre - 1.5*iqr_gre
high_fence_gre = q3_gre + 1.5*iqr_gre
print("GRE IQR: ", iqr_gre)
print("GRE low fence: ", low_fence_gre)
print("GRE high fence: ", high_fence_gre)
# Find GRE outliers
print('Number of outliers: ', df_raw[(df_raw.gre < low_fence_gre) | (df_raw.gre > high_fence_gre)].shape[0])
print('These are the outliers: ')
df_raw[(df_raw.gre < low_fence_gre) | (df_raw.gre > high_fence_gre)]
# Remove GRE outliers
print('Shape before outlier removal is: ', df_raw.shape)
df = df_raw[(df_raw.gre >= low_fence_gre) & (df_raw.gre <= high_fence_gre)]
print('Shape after outlier removal is: ', df.shape)
# Plot to visually inspect distribution, still looks skewed
df.gre.plot.density()
plt.title('GRE density')
plt.show()
# GPA IQR
q3_gpa = summary_stats_admissions.gpa['75%']
q1_gpa = summary_stats_admissions.gpa['25%']
iqr_gpa = q3_gpa - q1_gpa
low_fence_gpa = q1_gpa - 1.5*iqr_gpa
high_fence_gpa = q3_gpa + 1.5*iqr_gpa
print("GPA IQR: ", round(iqr_gpa, 1))
print("GPA low fence: ", round(low_fence_gpa, 1))
print("GPA high fence: ", round(high_fence_gpa, 1))
# Now, find GPA Outliers
print('Number of outliers: ', df[(df.gpa < low_fence_gpa) | (df.gpa > high_fence_gpa)].shape[0])
print('These are the outliers: ')
df[(df.gpa < low_fence_gpa) | (df.gpa > high_fence_gpa)]
print('Shape before outlier removal is: ', df.shape)
df = df[(df.gpa >= low_fence_gpa) & (df.gpa <= high_fence_gpa)]
print('Shape after outlier removal is: ', df.shape)
# Plot to visually inspect distribution, still looks skewed!
df.gpa.plot.density()
plt.title('GPA density')
plt.show()
# Removed outliers: re-test for normality using the Kolmogorov-Smirnov Test
# Observation: skew got better, kurtosis got worse!
# GRE
print('GRE: ', stats.kstest(df.gre, 'norm'))
print('Kurtosis: ', df.gre.kurt())
print('Skew: ', df.gre.skew())
print('~~~~~~~~~~~')
# GPA
print('GPA : ', stats.kstest(df.gpa, 'norm'))
print('Kurtosis: ', df.gpa.kurt())
print('Skew: ', df.gpa.skew())
# Transform GRE distribution to standard normal
sns.distplot( (df.gre - df.gre.mean()) / df.gre.std(), bins=5, kde_kws={'bw':1} )
sns.plt.title('GRE to Standard Normal')
sns.plt.show()
# Transform GPA distribution to standard normal
sns.distplot( (df.gpa - df.gpa.mean()) / df.gpa.std(), bins=10, kde_kws={'bw':1} )
sns.plt.title('GPA to Standard Normal')
sns.plt.show()
# Log transform the data: re-test for normality using the Kolmogorov-Smirnov Test
# Observation: Skew got worse, Kurtosis got better
# GRE
print('GRE: ', stats.kstest(np.log(df.gre), 'norm'))
print('Kurtosis: ', np.log(df.gre).kurt())
print('Skew: ', np.log(df.gre).skew())
print('~~~~~~~~~~~')
# GPA
print('GPA : ', stats.kstest(np.log(df.gpa), 'norm'))
print('Kurtosis: ', np.log(df.gpa).kurt())
print('Skew: ', np.log(df.gpa).skew())
# create a correlation matrix for the data
df_raw.corr()
sns.heatmap(df_raw.corr(), annot=True, cmap='RdBu')
pd.scatter_matrix(df_raw)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step1: Step 2
Step2: Questions
Step3: Answer
Step4: Question 3. Why would GRE have a larger STD than GPA?
Step5: Answer
Step6: Question 5. Confirm that you dropped the correct data. How can you tell?
Step7: Question 7. What do these plots show?
Step8: The Admit distribtion is bimodal (has two modes, 0, and 1) as expected. Both the GRE distribution and GPA distribution are approximately symmetrical. The Prestige distribution is multimodal (has four modes, 1, 2, 3, 4) as expected.
Step9: Answer
Step10: Answer
|
7,230
|
<ASSISTANT_TASK:>
Python Code:
import openpnm as op
%config InlineBackend.figure_formats = ['svg']
import matplotlib.pyplot as plt
pn = op.network.Cubic(shape=[20, 20, 20], spacing=100)
geo = op.geometry.SpheresAndCylinders(network=pn, pores=pn.Ps, throats=pn.Ts)
print(geo)
fig = plt.hist(geo['pore.diameter'], bins=25, edgecolor='k')
print(geo.models)
f = op.models.geometry.pore_size.normal
geo.add_model(propname='pore.diameter',
model=f,
loc=50, scale=10)
fig = plt.hist(geo['pore.diameter'], bins=25, edgecolor='k')
print(geo.models['pore.seed'])
geo.models['pore.seed']['num_range'] = [0.001, 0.999]
geo.regenerate_models()
fig = plt.hist(geo['pore.diameter'], bins=25, edgecolor='k')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The spacing of the above network is in um for this example to make values easier to read, but in general you should always use SI
Step2: As can be seen by printing it, there are quite a few geometrical properties already added to this object. Defining these manually would have been a pain, so it's a good idea to start with this class then alter the few models that need it
Step3: The pore size distribution on the SpheresAndCylinders is probably the more likely thing to change, since it is a random (i.e. uniform distribution) as shown below
Step4: The models on the geo object can be seen by printing them
Step5: In this tutorial we will change how pore sizes are calculated. We can do this by assigning a new pore-scale model for 'pore.diameter'. Let's use Gaussian distribution
Step6: This model is automatically run when it's assigned, so we can inspect the new pore diameter values
Step7: The above distribution does not look very much like a Gaussian distribution. This is because the 'pore.seed' values are truncated between 0.2 and 0.7
Step8: We should change this to a wider range to capture more pores on the "tails", then call regenerate_models, which will not only regenerate the random numbers, but all the other properties that depend on it such as 'pore.diameter', 'pore.volume', and so on
|
7,231
|
<ASSISTANT_TASK:>
Python Code:
# The main function
import karps as ks
# The standard library
import karps.functions as f
# Some tools to display the computation process:
from karps.display import show_phase
def harmonic_mean(col):
count = f.as_double(f.count(col))
inv_sum = 1.0/f.sum(1.0/col)
return inv_sum * count
# Using Pandas to evaluate our function:
import pandas as pd
pandas_df = pd.DataFrame([1.0, 2.0])
harmonic_mean(pandas_df)
# Create a HUGE dataframe
df = ks.dataframe([1.0, 2.0], name="my_input")
df
# And apply our function:
cached_df = f.autocache(df)
hmean = harmonic_mean(cached_df)
hmean
# All computations happen within a session, which keeps track of the state in Spark.
s = ks.session("demo1")
comp = s.compute(hmean)
show_phase(comp, "initial")
show_phase(comp, "MERGE_PREAGG_AGGREGATIONS")
show_phase(comp, "MERGE_AGGREGATIONS")
show_phase(comp, "final")
comp.values()
s.eval(hmean)
show_phase(comp, "parsed")
show_phase(comp, "physical")
show_phase(comp, "rdd")
comp.dump_profile("karps_trace_1.json")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here is the definition of the harmonic mean, which is a simple function. Given a column containing floating point values, it is defined as such
Step2: This is exactly how one would want to code it in numpy, pandas, and using basic Spark constructs. In fact, you can run this code straight inside Pandas
Step3: This code has a number of problems if you want to use it in Spark however
Step4: Something to immediately note is that the computation is lazy
Step5: The compute function not only triggest the computation, but also provides more debugging information into what is happening. We are going to introspect the compiler passes to see how things get transformed.
Step6: Here is the initial graph of computation, as we built it. Click on the nodes to have more detailed information.
Step7: The important part to notice though is that after the count1 and the sum4 nodes, all the other nodes are observables (local values). They do not involve distributed datasets anymore, so they are very cheap to compute. The Karps compiler is going to optimize the distributed part to reduce the amount of computations, everything after that is not important for now.
Step8: Now that we only perform a single aggregation, do we still need to cache the data? We don't! The next compiler phase is going to inspect the autocache nodes, and see how many times they get to be aggregated, and remove them if possible. In this case, it correctly infers that we do not need this autocache0 operator. Here is the final graph that gets executed
Step9: More work could be done to simplify the local nodes, but this is outside the scope of this first project.
Step10: Or in short if you do not want to see what is happening
|
7,232
|
<ASSISTANT_TASK:>
Python Code:
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
from utils import analysis
from utils import marketsim as msim
orders_path = '../../data/orders/orders-my-leverage.csv'
orders_df = pd.read_csv(orders_path, index_col='Date', parse_dates=True, na_values=['nan'])
orders_df
data_df = pd.read_pickle('../../data/data_df.pkl')
port_vals_df, values = msim.simulate_orders(orders_df, data_df)
port_vals_df.plot()
values
analysis.value_eval(port_vals_df, graph=True, verbose=True, data_df=data_df)
'AAPL' in data_df.columns.tolist()
data_df.index.get_level_values(0)[0]
symbols = data_df.columns.get_level_values(0).tolist()
symbols.append('CASH')
positions_df = pd.DataFrame(index=symbols, columns=['shares', 'value'])
positions_df
close_df = data_df.xs('Close', level='feature')
close_df.head()
current_date = close_df.index[-1]
current_date
positions_df['shares'] = np.zeros(positions_df.shape[0])
positions_df.loc['CASH','shares'] = 1000
positions_df
SHARES = 'shares'
VALUE = 'value'
CASH = 'CASH'
prices = close_df.loc[current_date]
prices[CASH] = 1.0
positions_df[VALUE] = positions_df[SHARES] * prices
positions_df
ORDER_SYMBOL = 'symbol'
ORDER_ORDER = 'order'
ORDER_SHARES = 'shares'
BUY = 'BUY'
SELL = 'SELL'
NOTHING = 'NOTHING'
order = pd.Series(['AAPL', BUY, 200], index=[ORDER_SYMBOL, ORDER_ORDER, ORDER_SHARES])
order
if order[ORDER_ORDER] == 'BUY':
positions_df.loc[order[ORDER_SYMBOL], SHARES] += order[ORDER_SHARES]
positions_df.loc[CASH, SHARES] -= order[ORDER_SHARES] * close_df.loc[current_date, order[ORDER_SYMBOL]]
if order[ORDER_ORDER] == 'SELL':
positions_df.loc[order[ORDER_SYMBOL], SHARES] -= order[ORDER_SHARES]
positions_df.loc[CASH, SHARES] += order[ORDER_SHARES] * close_df.loc[current_date, order[ORDER_SYMBOL]]
positions_df[VALUE] = positions_df[SHARES] * prices
positions_df.loc['AAPL']
positions_df.loc[CASH]
close_df.loc[current_date, 'AAPL']
116*200
positions_df[VALUE].iloc[:-1]
values = positions_df[VALUE]
leverage = np.sum(np.abs(values.iloc[:-1])) / (np.sum(values))
leverage
from recommender.portfolio import Portfolio
p = Portfolio(data_df)
from recommender.order import Order
o1 = Order(['AAPL', BUY, 150])
print(o1)
p.positions_df
p.positions_df.loc['AAPL']
p.execute_order(o1)
p.positions_df.loc[['AAPL','CASH']]
p.add_market_days(1)
p.current_date
p.positions_df.loc[['AAPL', CASH]]
p.add_market_days(1)
p.current_date
p.positions_df.loc[['AAPL', CASH]]
p.positions_df[VALUE].sum()
p.execute_order(Order(['AAPL',SELL,100]))
p.positions_df[p.positions_df[SHARES] != 0]
p.execute_order(Order(['MSFT',BUY,120]))
p.get_positions()
p.leverage_limit = 2
p.execute_order(Order(['AAPL',BUY, 10]))
p.get_positions()
p.execute_order(Order(['AAPL',BUY, 5000]))
p.get_positions()
p.execute_order(Order(['AAPL',SELL, 300]))
p.get_positions()
p.execute_order(Order(['AAPL',SELL, 3000]))
p.get_positions()
pos = p.get_positions()
pos[VALUE].sum()
p.add_market_days(1000)
p.get_positions()
p.add_market_days(6000)
p.get_positions()
p.get_positions()[VALUE].sum()
p.add_market_days(-7000) # Back in time...
p.get_positions()
p.current_date
p.close_df.loc[p.current_date, 'GOOG']
p.execute_order(Order(['GOOG', BUY, 100]))
p.get_positions()
# I need to add some cash, because I lost a lot of money shorting AAPL in the last 20 years, and I need to meet the leverage limits.
p.positions_df.loc[CASH, SHARES] = 100000
p.update_values()
p.add_market_days(7200)
p.execute_order(Order(['GOOG', BUY, 100]))
p.get_positions()
p.leverage_limit
p.my_leverage_reached()
p.get_leverage()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To use the market simulator with the q-learning agent it must be possible to call it with custom data, stored in RAM. Let's try that.
Step2: That function has many of the desired characteristics, but doesn't follow the dynamics necessary for the interaction with the agent. The solution will be to implement a new class, called Portfolio, that will accept orders, keep track of the positions and return their values when asked for.
Step3: Let's test the Portfolio class
Step4: Let's add a leverage limit of 2
Step5: Let's buy a less than the limit
Step6: Now, let's buy more than the limit
Step7: The last order wasn't executed because the leverage limit was reached. That's good.
Step8: Now, the same, but this time let's pass the limit.
Step9: Nothing happened because the leverage limit was reached. That's ok.
Step10: Let's try to buy GOOG before it entered the market...
Step11: Ok, nothing happened. That's correct.
Step12: Good. This time GOOG was bought!
|
7,233
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(np.random.randn(100), np.random.randn(100), c='g', s=50, marker='+', alpha=0.7)
plt.xlabel('Random x values')
plt.ylabel('Random y values')
plt.title('Randomness Fun!')
plt.hist(np.random.randn(100), bins=5, log=True, orientation='horizontal')
plt.xlabel('Logarithmic Probability')
plt.ylabel('Random Number')
plt.title('Probability of Random Numbers')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scatter plots
Step2: Histogram
|
7,234
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
import cv2 # for grayscale and normalize
# TODO: Fill this in based on where you saved the training and testing data
training_file ='traffic-signs-data/train.p'
validation_file='traffic-signs-data/valid.p'
testing_file = 'traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_trainLd, y_trainLd = train['features'], train['labels']
X_validLd, y_validLd = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
#X_trainLd=X_trainLd.astype(float)
#y_trainLd=y_trainLd.astype(float)
#X_validLd=X_validLd.astype(float)
#y_validLd=y_validLd.astype(float)
print("Xtrain shape : "+str(X_trainLd.shape)+" ytrain shape : "+str(y_trainLd.shape))
print("Xtrain shape : "+str(X_trainLd.shape)+" ytrain shape : "+str(y_trainLd.shape))
print("X_test shape : "+str(X_test.shape)+" y_test shape : "+str(y_test.shape))
from sklearn.model_selection import train_test_split
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
import numpy as np
# TODO: Number of training examples
n_train = X_trainLd.shape[0]
# TODO: Number of validation examples
n_validation = X_validLd.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_trainLd.shape[1:4]
# TODO: How many unique classes/labels there are in the dataset.
#n_classes = n_train+n_validation+n_test -- this doesn't seem correct 43 in excel file
n_classes = 43
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
import random
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
index = random.randint(0, len(X_trainLd))
image = X_trainLd[100] #squeeze : Remove single-dimensional entries from the shape of an array.
image = image.astype(float)
#normalise
def normit(img):
size = img.shape[2]
imagenorm = cv2.normalize(img, dst =image_shape, alpha=0, beta=25, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
image = img.astype(float)
norm = (image-128.0)/128.0
return norm
temp = normit(image)
plt.figure(figsize=(1,1))
plt.imshow(temp.squeeze())
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
import cv2
from sklearn.utils import shuffle
print("Test")
## xtrain
grey_X_train = np.zeros(shape=[X_trainLd.shape[0],X_trainLd.shape[1],X_trainLd.shape[2]])
norm_X_train = np.zeros(shape=[X_trainLd.shape[0],X_trainLd.shape[1],X_trainLd.shape[2],3])
norm_X_train = norm_X_train.astype(float)
X_train, y_train = shuffle(X_trainLd, y_trainLd)
shuff_X_train, shuff_y_train =X_train, y_train
X_valid, y_valid = X_validLd, y_validLd
i=0
for p in X_train:
t = normit(p)
norm_X_train[i] = t
i=i+1
print("after normalise")
##validate
norm_X_valid = np.zeros(shape=[X_validLd.shape[0],X_validLd.shape[1],X_validLd.shape[2],3])
norm_X_valid=norm_X_valid.astype(float)
i=0
for v in X_valid:
tv = normit(v)
#tempv = tv.reshape(32,32,1)
norm_X_valid[i] = tv
i=i+1
##test
norm_X_test=[]
norm_X_test = np.zeros(shape=[X_test.shape[0],X_test.shape[1],X_test.shape[2],3])
norm_X_test=norm_X_test.astype(float)
i=0
for testim in X_test:
tt = normit(testim)
norm_X_test[i] = tt
i=i+1
print("fin")
image22 = norm_X_train[110] ; imageb4 = X_train[110]; imagev=norm_X_valid[100]; imaget=norm_X_test[100]
plt.figure(figsize=(1,1))
plt.imshow(imagev.squeeze())
plt.figure(figsize=(1,1))
plt.imshow(imaget.squeeze()) #squeeze : Remove single-dimensional entries from the shape of an array
### Define your architecture here.
### Feel free to use as many code cells as needed.
import tensorflow as tf
EPOCHS = 30
BATCH_SIZE = 128 #SMcM change to 256 from 128
#X_train=X_train.astype(float)
X_train=norm_X_train
#print(X_train[20])
#X_train=shuff_X_train
#X_valid=norm_X_valid
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0.0
sigma = 0.1 #SMcM changed from 0.1 to 0.2
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5,3, 6), mean = mu, stddev = sigma)) #SMcM depth cahnged to 3
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b #try same should be better (padding)
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
#conv1 = tf.nn.relu(conv1) #SMcM add an extra relu
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
print("model")
image22 = X_train[110] #squeeze : Remove single-dimensional entries from the shape of an array
print(norm_X_train.shape)
print(X_train.shape)
plt.figure(figsize=(1,1))
plt.imshow(image22.squeeze())
#print(image22)
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
#Features and Labels
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
print("start")
#Training Pipeline
rate = 0.0025 # SMCM decreased rate to .0008 from 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
#Model Evaluation
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
#Train the Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(norm_X_valid, y_valid)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './sign')
print("Model saved")
#evaluate the model
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
print("restored")
test_accuracy = evaluate(norm_X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
#http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset
#http://benchmark.ini.rub.de/Dataset/GTSRB_Online-Test-Images.zip
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Include an exploratory visualization of the dataset
Step3: Step 2
Step4: Model Architecture
Step5: A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
Step6: Evaluate the Model
Step7: Step 3
Step8: Predict the Sign Type for Each Image
Step9: Analyze Performance
Step10: Output Top 5 Softmax Probabilities For Each Image Found on the Web
Step11: Project Writeup
|
7,235
|
<ASSISTANT_TASK:>
Python Code:
!pipeline --help
!pipeline init --ip 127.0.0.1 --port 9380
from pipeline.backend.pipeline import PipeLine
pipeline_upload = PipeLine().set_initiator(role='guest', party_id=9999).set_roles(guest=9999)
partition = 4
dense_data_guest = {"name": "breast_hetero_guest", "namespace": f"experiment"}
dense_data_host = {"name": "breast_hetero_host", "namespace": f"experiment"}
tag_data = {"name": "breast_hetero_host", "namespace": f"experiment"}
data_base = "/workspace/FATE/"
pipeline_upload.add_upload_data(file=os.path.join(data_base, "examples/data/breast_hetero_guest.csv"),
table_name=dense_data_guest["name"], # table name
namespace=dense_data_guest["namespace"], # namespace
head=1, partition=partition, # data info
extend_sid=True, # extend sid
auto_increasing_sid=False)
pipeline_upload.add_upload_data(file=os.path.join(data_base, "examples/data/breast_hetero_host.csv"),
table_name=dense_data_host["name"],
namespace=dense_data_host["namespace"],
head=1, partition=partition,
extend_sid=True,
auto_increasing_sid=False)
pipeline_upload.upload(drop=1)
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, DataTransform, Intersection, HeteroSecureBoost, Evaluation
from pipeline.interface import Data
pipeline = PipeLine() \
.set_initiator(role='guest', party_id=9999) \
.set_roles(guest=9999, host=10000)
reader_0 = Reader(name="reader_0")
# set guest parameter
reader_0.get_party_instance(role='guest', party_id=9999).component_param(
table={"name": "breast_hetero_guest", "namespace": "experiment"})
# set host parameter
reader_0.get_party_instance(role='host', party_id=10000).component_param(
table={"name": "breast_hetero_host", "namespace": "experiment"})
# set with match id
data_transform_0 = DataTransform(name="data_transform_0", with_match_id=True)
# set guest parameter
data_transform_0.get_party_instance(role='guest', party_id=9999).component_param(
with_label=True)
data_transform_0.get_party_instance(role='host', party_id=[10000]).component_param(
with_label=False)
intersect_0 = Intersection(name="intersect_0")
hetero_secureboost_0 = HeteroSecureBoost(name="hetero_secureboost_0",
num_trees=5,
bin_num=16,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "paillier"},
tree_param={"max_depth": 3})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=intersect_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secureboost_0.output.data))
pipeline.compile();
pipeline.fit()
import json
print(json.dumps(pipeline.get_component("data_transform_0").get_output_data(limits=3), indent=4))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Assume we have a FATE Flow Service in 127.0.0.1
Step2: upload data
Step3: Make a pipeline instance
Step4: Define partitions for data storage
Step5: Define table name and namespace, which will be used in FATE job configuration
Step6: Now, we add data to be uploaded. To create uuid as sid, turn on extend_sid option. Alternatively, set auto_increasing_sid to make extended sid starting at 0.
Step7: We can then upload data
Step8: After uploading, we can then start modeling. Here we build a Hetero SecureBoost model the same way as in this demo, but note how specificaiton of DataTransform module needs to be adjusted to crrectly load in match id.
Step9: Add components to pipeline, in order of execution
Step10: Now, submit(fit) our pipeline
Step11: Check data output on FATEBoard or download component output data to see now each data instance has a uuid as sid.
|
7,236
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import antipackage
import github.ellisonbg.misc.vizarray as va
def checkerboard(size):
Return a 2d checkboard of 0.0 and 1.0 as a NumPy array
the_checkerboard=np.zeros((size,size), dtype=float)
for i in range(size):
for q in range(size):
if i%2 == 1 and q%2==1:
the_checkerboard[i,q] = 1.0
elif q%2 == 0 and i%2==0:
the_checkerboard[i,q] = 1.0
return(the_checkerboard)
a = checkerboard(4)
assert a[0,0]==1.0
assert a.sum()==8.0
assert a.dtype==np.dtype(float)
assert np.all(a[0,0:5:2]==1.0)
assert np.all(a[1,0:5:2]==0.0)
b = checkerboard(5)
assert b[0,0]==1.0
assert b.sum()==13.0
assert np.all(b.ravel()[0:26:2]==1.0)
assert np.all(b.ravel()[1:25:2]==0.0)
va.set_block_size(10)
va.vizarray(checkerboard(20))
assert True
va.set_block_size(5)
va.vizarray(checkerboard(27))
assert True
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Checkerboard
Step3: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px.
Step4: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px.
|
7,237
|
<ASSISTANT_TASK:>
Python Code:
arr = io.imread('0mm_cam0.tif')
print 'Image has been loaded as a 2d numpy array with ', arr.shape, 'rows and columns. Datatype =', arr.dtype
io.implot('0mm_cam0.tif')
cd ../particle_images/
io.implot('TomoImg_cam0_a00001.tif', cmap='jet')
io.imsave('raw_image_data.txt', arr)
import scipy as sp
sparse_arr = sp.sparse.coo_matrix(arr)
print sparse_arr
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting images
Step2: One can also use different matplotlib colormaps while plotting the images as demonstrated below.
Step3: Saving arrays as text files/images
Step4: The text file would contain many zeroes and few non-zero integers indicating the intensities at each pixel
|
7,238
|
<ASSISTANT_TASK:>
Python Code:
#Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Mostramos todos los gráficos en el notebook
%pylab inline
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('datos.csv')
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
#columns = ['temperatura', 'entrada']
columns = ['temperatura', 'entrada']
#Mostramos en varias gráficas la información obtenida tras el ensayo
ax = datos[columns].plot(secondary_y=['entrada'],figsize=(10,5), ylim=(20,60),title='Modelo matemático del sistema')
ax.set_xlabel('Tiempo')
ax.set_ylabel('Temperatura [ºC]')
#datos_filtrados['RPM TRAC'].plot(secondary_y=True,style='g',figsize=(20,20)).set_ylabel=('RPM')
# Buscamos el polinomio de orden 4 que determina la distribución de los datos
reg = np.polyfit(datos['time'],datos['temperatura'],2)
# Calculamos los valores de y con la regresión
ry = np.polyval(reg,datos['time'])
print (reg)
plt.plot(datos['time'],datos['temperatura'],'b^', label=('Datos experimentales'))
plt.plot(datos['time'],ry,'ro', label=('regresión polinómica'))
plt.legend(loc=0)
plt.grid(True)
plt.xlabel('Tiempo')
plt.ylabel('Temperatura [ºC]')
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
datos_it1 = pd.read_csv('Regulador1.csv')
columns = ['temperatura']
#Mostramos en varias gráficas la información obtenida tras el ensayo
ax = datos_it1[columns].plot(figsize=(10,5), ylim=(20,100),title='Modelo matemático del sistema con regulador',)
ax.set_xlabel('Tiempo')
ax.set_ylabel('Temperatura [ºC]')
ax.hlines([80],0,3500,colors='r')
#Calculamos MP
Tmax = datos_it1.describe().loc['max','temperatura'] #Valor de la Temperatura maxima en el ensayo
print (" {:.2f}").format(Tmax)
Sp=80.0 #Valor del setpoint
Mp= ((Tmax-Sp)/(Sp))*100
print("El valor de sobreoscilación es de: {:.2f}%".format(Mp))
#Calculamos el Error en régimen permanente
Errp = datos_it1.describe().loc['75%','temperatura'] #Valor de la temperatura en régimen permanente
Eregimen = abs(Sp-Errp)
print("El valor del error en régimen permanente es de: {:.2f}".format(Eregimen))
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
datos_it2 = pd.read_csv('Regulador2.csv')
columns = ['temperatura']
#Mostramos en varias gráficas la información obtenida tras el ensayo
ax2 = datos_it2[columns].plot(figsize=(10,5), ylim=(20,100),title='Modelo matemático del sistema con regulador',)
ax2.set_xlabel('Tiempo')
ax2.set_ylabel('Temperatura [ºC]')
ax2.hlines([80],0,3500,colors='r')
#Calculamos MP
Tmax = datos_it2.describe().loc['max','temperatura'] #Valor de la Temperatura maxima en el ensayo
Sp=80.0 #Valor del setpoint
Mp= ((Tmax-Sp)/(Sp))*100
print("El valor de sobreoscilación es de: {:.2f}%".format(Mp))
#Calculamos el Error en régimen permanente
Errp = datos_it2.describe().loc['75%','temperatura'] #Valor de la temperatura en régimen permanente
Eregimen = abs(Sp-Errp)
print("El valor del error en régimen permanente es de: {:.2f}".format(Eregimen))
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
datos_it3 = pd.read_csv('Regulador3.csv')
columns = ['temperatura']
#Mostramos en varias gráficas la información obtenida tras el ensayo
ax3 = datos_it3[columns].plot(figsize=(10,5), ylim=(20,180),title='Modelo matemático del sistema con regulador',)
ax3.set_xlabel('Tiempo')
ax3.set_ylabel('Temperatura [ºC]')
ax3.hlines([160],0,6000,colors='r')
#Calculamos MP
Tmax = datos_it3.describe().loc['max','temperatura'] #Valor de la Temperatura maxima en el ensayo
Sp=160.0 #Valor del setpoint
Mp= ((Tmax-Sp)/(Sp))*100
print("El valor de sobreoscilación es de: {:.2f}%".format(Mp))
#Calculamos el Error en régimen permanente
Errp = datos_it3.describe().loc['75%','temperatura'] #Valor de la temperatura en régimen permanente
Eregimen = abs(Sp-Errp)
print("El valor del error en régimen permanente es de: {:.2f}".format(Eregimen))
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
datos_it4 = pd.read_csv('Regulador4.csv')
columns = ['temperatura']
#Mostramos en varias gráficas la información obtenida tras el ensayo
ax4 = datos_it4[columns].plot(figsize=(10,5), ylim=(20,180),title='Modelo matemático del sistema con regulador',)
ax4.set_xlabel('Tiempo')
ax4.set_ylabel('Temperatura [ºC]')
ax4.hlines([160],0,7000,colors='r')
#Calculamos MP
Tmax = datos_it4.describe().loc['max','temperatura'] #Valor de la Temperatura maxima en el ensayo
Sp=160.0 #Valor del setpoint
Mp= ((Tmax-Sp)/(Sp))*100
print("El valor de sobreoscilación es de: {:.2f}%".format(Mp))
#Calculamos el Error en régimen permanente
Errp = datos_it4.describe().loc['75%','temperatura'] #Valor de la temperatura en régimen permanente
Eregimen = abs(Sp-Errp)
print("El valor del error en régimen permanente es de: {:.2f}".format(Eregimen))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Respuesta del sistema
Step2: Cálculo del polinomio
Step3: El polinomio caracteristico de nuestro sistema es
Step4: En este caso hemos establecido un setpoint de 80ºC Como vemos, una vez introducido el controlador, la temperatura tiende a estabilizarse, sin embargo tiene mucha sobreoscilación. Por ello aumentaremos los valores de $K_i$ y $K_d$, siendo los valores de esta segunda iteracción los siguientes
Step5: En esta segunda iteracción hemos logrado bajar la sobreoscilación inicial, pero tenemos mayor error en regimen permanente. Por ello volvemos a aumentar los valores de $K_i$ y $K_d$ siendo los valores de esta tercera iteracción los siguientes
Step6: En este caso, se puso un setpoint de 180ºC. Como vemos, la sobreoscilación inicial ha disminuido en comparación con la anterior iteracción y el error en regimen permanente es menor. Para intentar minimar el error, aumentaremos únicamente el valor de $K_i$. Siendo los valores de esta cuarta iteracción del regulador los siguientes
|
7,239
|
<ASSISTANT_TASK:>
Python Code:
datafolder = "data"
import os
has_soi = sum([name.endswith("soi.dat") for name in os.listdir(datafolder)])
has_recruit = sum([name.endswith("recruit.dat") for name in os.listdir(datafolder)])
if (has_soi and has_recruit):
print 'You are ready to go'
else:
print 'Your current directory is:'
print os.getcwd()
print 'And found the following files in the "{}" directory:'.format(datafolder)
print os.listdir(datafolder)
print ''
if not has_soi:
print 'You are missing soi.dat'
if not has_recruit:
print 'You are missing recruit.dat'
assert (has_soi and has_recruit)
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
matplotlib.style.use('ggplot')
#%load_ext autoreload
#%autoreload 1
#%aimport bdranalytics
import bdranalytics
import pandas as pd
import numpy as np
import scipy as sc
import seaborn as sns
from scipy.ndimage.interpolation import shift
import sklearn
from sklearn import linear_model, model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.linear_model import Ridge, ElasticNet
from sklearn.feature_selection import RFE, RFECV
from sklearn.metrics import mean_squared_error
import itertools
from sklearn.metrics import make_scorer, r2_score
from sklearn.metrics.scorer import r2_scorer, mean_squared_error_scorer
import statsmodels
import statsmodels.tsa.api as sm
from bdranalytics.model_selection.growingwindow import GrowingWindow
from bdranalytics.pandaspipeline.transformers import PdFeatureChain, PdFeatureUnion, PdWindowTransformer, PdLagTransformer
from IPython.display import display
import IPython
print "IPython version: {}".format(IPython.__version__)
print "statsmodels: {}".format(statsmodels.__version__)
print "numpy: {}".format(np.__version__)
print "scipy: {}".format(sc.__version__)
print "sklearn: {}".format(sklearn.__version__)
print "pandas: {}".format(pd.__version__)
X_orig = pd.read_csv(os.path.join(datafolder, "soi.dat"), header=0, names=["soi"])
rng=pd.date_range('1/1/1866', periods=X_orig.size, freq='MS')
X_orig = X_orig.set_index(rng)
y_orig = pd.read_csv(os.path.join(datafolder, "recruit.dat"), header=0, names=["recruit"]).set_index(rng).iloc[:,0]
print "The soi dataset is used as features, and is {} rows by {} columns".format(X_orig.shape[0], X_orig.shape[1])
print "The recruit dataset holds the target value, and is a series of {} rows".format(y_orig.shape[0])
print "The first few rows, combining the features with the target, looks as follows:"
print X_orig.join(y_orig).head()
print "Some quantile statistics about the range of values:"
print X_orig.join(y_orig).describe()
fig, ax = plt.subplots(figsize=(17, 5), ncols=1, nrows=2)
ax[0].set_title("soi (the feature)")
fig1 = sns.tsplot(X_orig.soi, ax=ax[0])
ax[1].set_title("recruit (the target)")
fig2 = sns.tsplot(y_orig, ax=ax[1])
y = y_orig.shift(-1).dropna() # the next recruit
X = pd.concat([X_orig, # the original features: the current soi
y_orig.to_frame() # the current recruit
], axis=1, join_axes=[X_orig.index]).loc[y.index,:]
window_transformers = PdFeatureUnion([
('window{}'.format(window), PdWindowTransformer(lambda x: x.mean(), window=window)) for window in range(1, 12)
])
lag_transformers = PdFeatureUnion([
('lag{}'.format(lag), PdLagTransformer(lag)) for lag in range(20)])
new_features = [
('chain{}'.format(window),
PdFeatureChain([
('window{}'.format(window), PdWindowTransformer(lambda x: x.mean(), window=window)),
('lags', PdFeatureUnion([('lag{}'.format(lag), PdLagTransformer(lag)) for lag in range(window, 20, window)]))
])
)
for window in range(2, 12, 2)]
combined_features = PdFeatureUnion(
[('window{}'.format(window), PdWindowTransformer(lambda x: x.mean(), window=window)) for window in range(1,12)]
+
[('lag{}'.format(lag), PdLagTransformer(lag)) for lag in range(20)]
+ new_features
)
X = combined_features.fit_transform(X).dropna()
y = y[X.index] # because of dropped rows in X, need to also select corresponding remaining rows from y
model_score = mean_squared_error
model_scorer = make_scorer(mean_squared_error, greater_is_better=False)
def cross_val(estimator, X, y, scorer = model_scorer, cv_count=10):
return model_selection.cross_val_score(estimator, X, y.to_frame(),
scoring = scorer,
cv=GrowingWindow(cv_count))
## for different folds, trains the model, en returns the error on the **train** sets.
def cross_val_train(estimator, X, y, scorer = model_scorer, cv_count=10):
return [scorer(estimator.fit(X.iloc[train,:], y.iloc[train]),
X.iloc[train,:],
y.iloc[train])
for train, test in GrowingWindow(cv_count).split(X)]
i_train, i_test = list(itertools.islice(GrowingWindow(8).split(X), 6, 7))[0]
X_train = X.iloc[i_train,:]
y_train = y[i_train]
X_test = X.iloc[i_test,:]
y_test = y[i_test]
print "Train datasize dimensions = {}, Test datasets dimensions= {} ".format(X_train.shape, X_test.shape)
print "The names of the available columns:"
display(X_train.columns)
print "The first few training rows (and only a few columns), including the target variable {}".format(y_train.name)
display(y_train.to_frame().join(X_train).iloc[:,0:6].head())
linear_regression = Pipeline([
("lm", linear_model.LinearRegression())
])
print "num features:{}".format(X_orig.shape[1])
print "Scores (higher is better);"
print "cv train:\t{}".format(np.mean(cross_val_train(linear_regression, X_orig.loc[X_train.index,:], y_train, cv_count=10)))
print "cv test:\t{}".format(np.mean(cross_val(linear_regression, X_orig.loc[X_train.index,:], y_train, cv_count=10)))
print "num features:{}".format(X.shape[1])
print "Scores (higher is better);"
print "cv train:\t{}".format(np.mean(cross_val_train(linear_regression, X, y, cv_count=10)))
print "cv test:\t{}".format(np.mean(cross_val(linear_regression, X, y, cv_count=10)))
alternative = Pipeline([
("ridge", Ridge(alpha=1.0))
])
print "num features:\t{}".format(X.shape[1])
print "Scores (higher is better);"
print "cv train:\t{}".format(np.mean(cross_val_train(alternative, X_train, y_train, cv_count=10)))
print "cv test:\t{}".format(np.mean(cross_val(alternative, X_train, y_train, cv_count=10)))
param_grid={'alpha':np.power(1.5, range(-8,4))}
display(param_grid)
ridge_cv = GridSearchCV(estimator = Ridge(),
param_grid=param_grid,
scoring=model_scorer,
n_jobs=1,
cv=GrowingWindow(4), # Usually useful to select a slightly different cv set
verbose=1).fit(X_train, y_train)
print "params:\t\t{}".format(ridge_cv.best_params_)
print "num features:\t{}".format(X.shape[1])
print "Scores (higher is better);"
print "cv train:\t{}".format(np.mean(cross_val_train(ridge_cv.best_estimator_, X_train, y_train, cv_count=10)))
print "cv test:\t{}".format(np.mean(cross_val(ridge_cv.best_estimator_, X_train, y_train, cv_count=10)))
model_best = Ridge(**(ridge_cv.best_params_))
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.plot_acf(y_orig.squeeze(), lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.plot_pacf(y_orig.squeeze(), lags=40, ax=ax2)
rfe = RFECV(model_best, step=5, scoring = model_scorer, cv=GrowingWindow(6))
print "Scores (higher is better) (not this selects features per fold);"
print "cv train:\t{}".format(np.mean(cross_val_train(rfe, X_train, y_train, cv_count=10)))
print "cv test:\t{}".format(np.mean(cross_val(rfe, X_train, y_train, cv_count=10)))
rfe_fit = rfe.fit(X_train, y_train)
print rfe_fit.n_features_
print "As an example, the last remaining features were:"
X_train.loc[:, rfe_fit.ranking_<=1].head()
rfe_all = [np.mean(cross_val(
model_best,
X_train.loc[:, rfe_fit.ranking_<=i],
y_train,
cv_count=3))
for i in range(1, max(rfe_fit.ranking_))]
best_index = np.array(rfe_all).argsort()[::-1][0]
column_mask = rfe_fit.ranking_<=(best_index+1)
X_sub_train = X_train.loc[:, column_mask]
X_sub_test = X_test.loc[:, column_mask]
print 'Best index = {}'.format(best_index)
print 'Best nr of features = {}'.format(sum(column_mask))
print 'Which gives score = {}'.format(rfe_all[best_index])
print 'Column names = {}'.format(X_train.columns[column_mask].values)
print "Scores (higher is better);"
print "cv train:\t{}".format(np.mean(cross_val_train(model_best, X_sub_train, y_train, cv_count=10)))
print "cv test:\t{}".format(np.mean(cross_val(model_best, X_sub_train, y_train, cv_count=10)))
def final_describe(model, train, test):
Evaluates a model on the final test set. For comparison, also (cv) measurements are given about the train set.
model is the pandas pipeline that is evaluated
train is the train dataset (with more/less features)
test is the test dataset (with the same features as x_train)
# first some cross validation measurements
print "cross validated (within train set)"
print "cv train:\t{}".format(np.mean(cross_val_train(model, train, y_train, cv_count=10)))
print "cv test:\t{}".format(np.mean(cross_val(model, train, y_train, cv_count=10)))
# first fit the model on the FULL train set
fit = model.fit(train, y_train)
test_predictions = fit.predict(test)
print 'full train:\t{}'.format(-model_score(y_train, fit.predict(train)))
print "After fitting on full train set, evaluating on holdout set:"
print 'Holdout test:\t{}'.format(-model_score(y_test, test_predictions))
print 'Holdout r2:\t{}'.format(r2_score(y_test, test_predictions))
model_best
final_describe(model_best, X_sub_train, X_sub_test)
fit = model_best.fit(X_sub_train, y_train)
test_predictions = fit.predict(X_sub_test)
result = pd.DataFrame({"y_pred":test_predictions, "y_real":y_test})
result.plot()
final_describe(model_best, X_train, X_test)
final_describe(alternative, X_train, X_test)
final_describe(linear_regression, X_train, X_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Determining wether the folder holds the expected data files
Step2: And telling you if the folder is correct
Step3: Imports
Step4: 2 - Data load
Step5: Some information about the data set
Step6: Let us now show how the timeseries look through time
Step7: Defining the target variable
Step8: Defining the base features
Step9: 3 - Feature generation
Step10: Selecting usefull rows
Step11: ps. You might think, are we allowed to do fit_transform on the complete dataset?! Yes we are, because we don't have information leakage, because
Step12: Selecting a hold out set
Step13: Note that _window1 is actually just the value. Thus soi_window1 is just the soi feature.
Step14: The quality of our first naive model
Step15: Probably you do much better with your new set of features.
Step16: Choosing a different model
Step17: Model tuning
Step18: This resulted in the following best meta parameters
Step19: 6 - Selecting features
Step20: This shows that mainly the first 2 lags of recruit are useful, the others are just noise. (The first one at x==0 reflects the correlation of recruit with itself)
Step21: First we determine a fit, to get the order in which features are removed.
Step22: Now, per step, we determine the cross val score using the features that were still remaining at that step. This gives a good evaluation of those features, of which we can then select the best
Step23: We now define two variables to indicate the selected features
Step25: 7 - Final score on the test set
Step26: Conclusion
Step27: Best model on all features
Step28: Alternative model on all features
Step29: Initial (linear) model on all features
|
7,240
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from numpy import *
from scipy.integrate import odeint
from matplotlib.pyplot import *
ion()
def RM(y, t, r, K, a, h, e, d):
return array([ y[0] * ( r*(1-y[0]/K) - a*y[1]/(1+a*h*y[0]) ),
y[1] * (e*a*y[0]/(1+a*h*y[0]) - d) ])
t = arange(0, 1000, .1)
y0 = [1, 1.]
pars = (1., 10., 1., 0.1, 0.1, 0.1)
y = odeint(RM, y0, t, pars)
plot(t, y)
xlabel('time')
ylabel('population')
legend(['resource', 'consumer'])
# plot the solution in the phase space
plot(y[:,0], y[:,1])
# defines a grid of points
R, C = meshgrid(arange(0.95, 1.25, .05), arange(0.95, 1.04, 0.01))
# calculates the value of the derivative at the point in the grid
dy = RM(array([R, C]), 0, *pars)
# plots arrows on the points of the grid, with the difection
# and length determined by the derivative dy
# This is a picture of the flow of the solution in the phase space
quiver(R, C, dy[0,:], dy[1,:], scale_units='xy', angles='xy')
xlabel('Resource')
ylabel('Consumer')
# now K = 15
t = arange(0, 1000, .1)
pars = (1., 15., 1., 0.1, 0.1, 0.1)
y_osc = odeint(RM, y0, t, pars)
plot(t, y_osc)
xlabel('time')
ylabel('population')
legend(['resource', 'consumer'])
plot(y_osc[:,0], y_osc[:,1])
R, C = meshgrid(arange(0, 6., .4), arange(0, 2.1, 0.2))
dy = RM(array([R, C]), 0, *pars)
quiver(R, C, dy[0,:], dy[1,:], scale_units='xy', angles='xy')
xlabel('R')
ylabel('C')
plot(10., y[-500:,0].min(), 'og')
plot(10., y[-500:,0].max(), 'og')
plot(10., y[-500:,1].min(), 'ob')
plot(10., y[-500:,1].max(), 'ob')
plot(15., y_osc[-500:,0].min(), 'og')
plot(15., y_osc[-500:,0].max(), 'og')
plot(15., y_osc[-500:,1].min(), 'ob')
plot(15., y_osc[-500:,1].max(), 'ob')
xlim((0, 20))
yscale('log')
xlabel('K')
ylabel('min / max population')
## this block calculates solutions for many K's, it should take some time
# empty lists to append the values later
ymin = []
ymax = []
KK = arange(.5, 25, .5)
t = arange(0, 6000, 1.)
# loop over the values of K (KK)
for K in KK:
# redefine the parameters using the new K
pars = (1., K, 1., 0.1, 0.1, 0.1)
# integrate again the equation, with new parameters
y = odeint(RM, y0, t, pars)
# calculate the minimum and maximum of the populations, but
# only for the last 1000 steps (the long-term solution),
# appending the result to the list
# question: is 1000 enough? When it wouldn't be?
ymin.append(y[-1000:,:].min(axis=0))
ymax.append(y[-1000:,:].max(axis=0))
# convert the lists into arrays
ymin = array(ymin)
ymax = array(ymax)
# and now, we plot the bifurcation diagram
plot(KK, ymin[:,0], 'g', label='resource')
plot(KK, ymax[:,0], 'g')
plot(KK, ymin[:,1], 'b', label='consumer')
plot(KK, ymax[:,1], 'b')
xlabel('$K$')
ylabel('min/max populations')
legend(loc='best')
# use a log scale in the y-axis
yscale('log')
def RM_season(y, t, r, alpha, T, K, a, h, e, d):
# in this function, `t` appears explicitly
return array([ y[0] * ( r * (1+alpha*sin(2*pi*t/T)) *
(1-y[0]/K) - a*y[1]/(1+a*h*y[0]) ),
y[1] * (e*a*y[0]/(1+a*h*y[0]) - d) ])
t = arange(0, 2000, 1.)
y0 = [1., 1.]
pars = (1., 0.1, 80., 10., 1., 0.1, 0.1, 0.1)
y = odeint(RM_season, y0, t, pars)
plot(t, y)
xlabel('time')
ylabel('population')
legend(['resource', 'consumer'])
ymin = []
ymax = []
t = arange(0, 6000, 1.) # times
TT = arange(1, 80, 2) # periods
for T in TT:
pars = (1., 0.1, T, 10., 1., 0.1, 0.1, 0.1)
y = odeint(RM_season, y0, t, pars)
ymin.append(y[-1000:,:].min(axis=0))
ymax.append(y[-1000:,:].max(axis=0))
ymin = array(ymin)
ymax = array(ymax)
plot(TT, ymin[:,0], 'g', label='resource')
plot(TT, ymax[:,0], 'g')
plot(TT, ymin[:,1], 'b', label='consumer')
plot(TT, ymax[:,1], 'b')
xlabel('$T$')
ylabel('min/max populations')
legend(loc='best')
yscale('log')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For the parameters chosen above, the long-term (asymptotic) solution is a fixed point. Let's see this in the phase space, that is, the space of Predators ($P$) vs. Prey ($V$). We note that the arrows are "circulating", but always point inwards, and so the trajectory moves toward the middle, to the fixed point.
Step2: Messing a little with the parameters...
Step3: And, looking again at the phase space plot, we now see that the flux (the arrows) inside circles outwards, towards a limit cycle, and the arrows outside points inwards. The limit cycle corresponds to the periodic solution we just saw.
Step4: The bifurcation diagram
Step5: This is a very poor bifurcation diagram
Step6: Well, the first prediction was OK (notice that the plot above uses a log scale), but for high $K$, the minima of the oscillation go to very low values, so that the populations have a high risk of extinction. This phenomenon is the so-called paradox of enrichment.
Step7: Notice that, even with small $K$, the solutions oscilate due to the oscilation of $r(t)$.
|
7,241
|
<ASSISTANT_TASK:>
Python Code:
%reload_ext autoreload
%autoreload 2
import numpy as np
import os
import pandas as pd
import random
import scipy
from scipy.stats import zscore
# interactive
from ipywidgets.widgets import interact, IntSlider, FloatSlider
from IPython.display import display
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from multiDatenanalyse import *
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
mmPfad = r"D:\C\Uni\Master\KSS\MV_Analyse\Messmatrix.csv"#'../data/Messmatrix.csv'
df = load_data(mmPfad)
min_num_walzlos = 300
df_all_prod = [extract_product(df, product_id=product_id, min_num_walzlos=min_num_walzlos) for product_id in range(26)]
@interact(index=IntSlider(min=0, max=25, value = 11))
def count_per_product(index):
print("Anzahl der Walzlose: "+str(len(pd.unique(df_all_prod[index]["Header_Walzlos"]))))
product_id = 11
df_prod = df_all_prod[product_id]
print("Anzahl der Walzlose: "+str(len(pd.unique(df_prod["Header_Walzlos"]))))
df_prod.columns
test_frac = 0.2
train_set, test_set = get_lda_data(df_prod, test_frac=test_frac)
train_set['data'] = zscore(train_set['data'])
test_set['data'] = zscore(test_set['data'])
cov_train = np.cov(train_set['data'].T)
cov_test = np.cov(test_set['data'].T)
plt.figure(figsize=(15,10))
ax1 = plt.subplot(121)
ax1.imshow(255*(cov_train-np.max(cov_train))/(np.max(cov_train)-np.min(cov_train)), 'gray')
ax1.set_title('Kovarianz der Trainingsdaten')
ax1.set_xlabel('Merkmal')
ax1.set_ylabel('Merkmal')
ax2 = plt.subplot(122)
ax2.imshow(255*(cov_test-np.max(cov_test))/(np.max(cov_test)-np.min(cov_test)), 'gray')
ax2.set_title('Kovarianz der Testdaten')
ax2.set_xlabel('Merkmal')
ax2.set_ylabel('Merkmal')
print('Wie selbstähnlich sind die Test- und Trainingsdaten?')
# extract data and label
X_train, y_train = train_set['data'], train_set['label']
X_test, y_test = test_set['data'], test_set['label']
# number components for transform
n_components = 3
# LDA object
sklearn_LDA = LDA(n_components=n_components, solver='eigen')
# fit with train data
sklearn_LDA = sklearn_LDA.fit(X_train, y_train)
plt.stem(sklearn_LDA.explained_variance_ratio_)
plt.xlabel('Index Eigenwert')
plt.ylabel('Beitrag zur Varianz')
plt.title("Varainzverteilung")
train_pred = sklearn_LDA.predict(X_train)
print('{0:.2f}% Genauigkeit bei der Klassifikation der Trainingsdaten'.format(100*np.mean(train_pred == y_train)))
test_pred = sklearn_LDA.predict(X_test)
print('{0:.2f}% Genauigkeit bei der Klassifikation der Testdaten'.format(100*np.mean(test_pred == y_test)))
data = sklearn_LDA.transform(X_train)
plot_lda(data, y_train, 'Transformierte Trainingsdaten')
eigvecs = sklearn_LDA.scalings_
plt.figure(figsize=(20,5))
plt.imshow(np.abs(eigvecs), 'gray')
#_ = plt.axis('off')
plt.title("Eigenvektoren")
print('Einflussreichstes Merkmal im ersten EV: {}'.format(df[df.columns[6:]].columns[np.argmax(np.abs(eigvecs[:, 0]))]))
print('Einflussreichstes Merkmal im zweiten EV: {}'.format(df[df.columns[6:]].columns[np.argmax(np.abs(eigvecs[:, 1]))]))
plt.figure(figsize=(20,10))
for index in range(3):
ax = plt.subplot(1,3,index+1)
ax.stem(eigvecs[:, index])
ax.set_title('Eigenvektor {}'.format(index))
ax.set_xlabel('Merkmalsindex')
ax.set_ylabel('Beitrag in Eigenvektor')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Laden der Merkmalsmatrix
Step2: Produktweise Sortierung der Daten
Step3: Auswahl des Produtes durch Schieberegler implementieren
Step4: Auswahl eines Produktes und Ausgabe der Anzahl von Walzlosen mit "genügend" Rohren
Step5: Verbleibende Merkmale
Step6: Aufteilen der Daten in Test- und Trainingsdaten
Step7: Normalisierung der Daten
Step8: Kovarianzmatrix von Trainings- und Testdaten
Step9: Dürchführen der LDA auf die Trainingsdaten
Step10: Darstellung der Eigenwerte
Step11: Testen der Klassifikation
Step12: Darstellung der transformierten Trainingsdaten und Klassenzugehörigkeit
Step13: Interpretation der LDA-Ergebnisse
Step14: Darstellung der Eigenvektoren
|
7,242
|
<ASSISTANT_TASK:>
Python Code:
def swap(A, i, j):
A[i], A[j] = A[j], A[i]
def sink(A, k, n):
while 2 * k + 1 <= n:
j = 2 * k + 1
if j + 1 <= n and A[j] > A[j + 1]:
j += 1
if A[k] < A[j]:
return
swap(A, k, j)
k = j
def heap_sort(A):
n = len(A) - 1
for k in range((n + 1) // 2 - 1, -1, -1):
sink(A, k, n)
while n >= 1:
swap(A, 0, n)
n -= 1
sink(A, 0, n)
def heap_sort(A):
n = len(A) - 1
for k in range((n + 1) // 2 - 1, -1, -1):
sink(A, k, n)
while n >= 1:
swap(A, 0, n)
n -= 1
sink(A, 0, n)
import random as rnd
def isOrdered(L):
for i in range(len(L) - 1):
assert L[i] >= L[i+1]
from collections import Counter
def sameElements(L, S):
assert Counter(L) == Counter(S)
def testSort(n, k):
for i in range(n):
L = [ rnd.randrange(2*k) for x in range(k) ]
oldL = L[:]
heap_sort(L)
isOrdered(L)
sameElements(L, oldL)
assert len(L) == len(oldL)
print('.', end='')
print()
print("All tests successful!")
%%time
testSort(100, 20_000)
%%time
k = 1000_000
L = [ rnd.randrange(2 * k) for x in range(k) ]
S = heap_sort(L)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The procedure sink takes three arguments.
Step2: The function call heapSort(A) has the task to sort the array A and proceeds in two phases.
Step3: The version of heap_sort given below adds some animation.
Step4: Testing
Step5: The function $\texttt{testSort}(n, k)$ generates $n$ random lists of length $k$, sorts them, and checks whether the output is sorted and contains the same elements as the input.
Step6: Next, we sort a million random integers. It is not as fast as merge sort, but we do not need an auxiliary array and hence we don't need additional storage.
|
7,243
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from matplotlib import pyplot as plt
print(__doc__)
# Setup for reading the raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Read the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
events = mne.read_events(event_fname)
# The EEG channels will be plotted to visualize the difference in referencing
# schemes.
picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=True, exclude='bads')
reject = dict(eeg=180e-6, eog=150e-6)
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, reject=reject)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True)
# No reference. This assumes that the EEG has already been referenced properly.
# This explicitly prevents MNE from adding a default EEG reference.
raw.set_eeg_reference([])
evoked_no_ref = mne.Epochs(raw, **epochs_params).average()
evoked_no_ref.plot(axes=ax1, titles=dict(eeg='EEG Original reference'))
# Average reference. This is normally added by default, but can also be added
# explicitly.
raw.set_eeg_reference()
evoked_car = mne.Epochs(raw, **epochs_params).average()
evoked_car.plot(axes=ax2, titles=dict(eeg='EEG Average reference'))
# Re-reference from an average reference to the mean of channels EEG 001 and
# EEG 002.
raw.set_eeg_reference(['EEG 001', 'EEG 002'])
evoked_custom = mne.Epochs(raw, **epochs_params).average()
evoked_custom.plot(axes=ax3, titles=dict(eeg='EEG Custom reference'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Apply different EEG referencing schemes and plot the resulting evokeds.
|
7,244
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
from sympy import init_printing
from sympy import S
from sympy import sin, cos, tanh, exp, pi, sqrt, log
from boutdata.mms import x, y, z, t
from boutdata.mms import DDX
import os, sys
# If we add to sys.path, then it must be an absolute path
common_dir = os.path.abspath('./../../../../common')
# Sys path is a list of system paths
sys.path.append(common_dir)
from CELMAPy.MES import make_plot, BOUT_print
init_printing()
folder = '../gaussianWSinAndParabola/'
# Initialization
the_vars = {}
# We need Lx
from boututils.options import BOUTOptions
myOpts = BOUTOptions(folder)
Lx = eval(myOpts.geom['Lx'])
Ly = eval(myOpts.geom['Ly'])
# Gaussian with sinus and parabola
# The skew sinus
# In cartesian coordinates we would like a sinus with with a wave-vector in the direction
# 45 degrees with respect to the first quadrant. This can be achieved with a wave vector
# k = [1/sqrt(2), 1/sqrt(2)]
# sin((1/sqrt(2))*(x + y))
# We would like 2 nodes, so we may write
# sin((1/sqrt(2))*(x + y)*(2*pi/(2*Lx)))
# The gaussian
# In cartesian coordinates we would like
# f = exp(-(1/(2*w^2))*((x-x0)^2 + (y-y0)^2))
# A parabola
# In cartesian coordinates, we have
# ((x-x0)/Lx)^2
wx = 0.5*Lx
wy = 0.5*Ly
x0 = 0.3*Lx
y0 = 0.5*Ly
the_vars['f'] = sin((1/sqrt(2))*(x + y)*(2*pi/(2*Lx)))*\
exp(-(((x-x0)**2/(2*wx**2)) + ((y-y0)**2/(2*wy**2))))*\
((x-x0)/Lx)**2
the_vars['a'] = the_vars['f'].subs(y, 0)
the_vars['b'] = the_vars['f'].diff(y).subs(y, 0)
make_plot(folder=folder, the_vars=the_vars, plot2d=True, include_aux=False, direction='y')
BOUT_print(the_vars, rational=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize
Step2: Define the variables
Step3: Plot
Step4: Print the variables in BOUT++ format
|
7,245
|
<ASSISTANT_TASK:>
Python Code:
import bifacial_radiance
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP'/ 'Tutorial_08')
if not os.path.exists(testfolder):
os.makedirs(testfolder)
simulationName = 'tutorial_8'
moduletype = "test-module"
albedo = 0.25
lat = 37.5
lon = -77.6
# Scene variables
nMods = 20
nRows = 7
hub_height = 1.5 # meters
gcr = 0.33
# Traking parameters
cumulativesky = False
limit_angle = 60
angledelta = 0.01
backtrack = True
#makeModule parameters
x = 1
y = 2
xgap = 0.01
zgap = 0.05
ygap = 0.0 # numpanels=1 anyways so it doesnt matter anyway
numpanels = 1
axisofrotationTorqueTube = True
diameter = 0.1
tubetype = 'Oct'
material = 'black'
tubeParams = {'diameter':diameter,
'tubetype':tubetype,
'material':material,
'axisofrotation':axisofrotationTorqueTube,
'visible':True}
# Analysis parmaeters
startdate = '11_06_08' # Options: mm_dd, mm_dd_HH, mm_dd_HHMM, YYYY-mm-dd_HHMM
enddate = '11_06_10'
sensorsy = 12
demo = bifacial_radiance.RadianceObj(simulationName, path=testfolder)
demo.setGround(albedo)
epwfile = demo.getEPW(lat,lon)
metdata = demo.readWeatherFile(epwfile, starttime=startdate, endtime=enddate)
mymodule = demo.makeModule(name=moduletype, x=x, y=y, xgap=xgap,
ygap = ygap, zgap=zgap, numpanels=numpanels, tubeParams=tubeParams)
pitch = mymodule.sceney/gcr
sceneDict = {'pitch':pitch,'hub_height':hub_height, 'nMods': nMods, 'nRows': nRows}
demo.set1axis(limit_angle = limit_angle, backtrack = backtrack, gcr = gcr, cumulativesky = cumulativesky)
demo.gendaylit1axis()
demo.makeScene1axis(module=mymodule, sceneDict=sceneDict)
demo.makeOct1axis()
demo.analysis1axis(sensorsy = sensorsy);
resultfolder = os.path.join(testfolder, 'results')
writefiletitle = "Mismatch_Results.csv"
portraitorlandscape='portrait' # Options are 'portrait' or 'landscape'
bififactor=0.9 # Bifaciality factor DOES matter now, as the rear irradiance values will be multiplied by this factor.
numcells= 72# Options are 72 or 96 at the moment.
downsamplingmethod = 'byCenter' # Options are 'byCenter' or 'byAverage'.
bifacial_radiance.mismatch.analysisIrradianceandPowerMismatch(testfolder=resultfolder, writefiletitle=writefiletitle, portraitorlandscape=portraitorlandscape,
bififactor=bififactor, numcells=numcells)
print ("Your hourly mismatch values are now saved in the file above! :D")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='step2'></a>
|
7,246
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.feature_selection import VarianceThreshold
X = [[0, 2, 0, 3],
[0, 1, 4, 3],
[0, 1, 1, 3]]
selector = VarianceThreshold()
selector.fit_transform(X)
import pandas as pd
import seaborn as sns
%matplotlib inline
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
sns.heatmap(pd.DataFrame(X).corr(), cmap=sns.diverging_palette(220, 10, as_cmap=True));
from sklearn.datasets import load_iris
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
X, y = load_iris(return_X_y=True)
print('Before feature selection:', X.shape)
selector = SelectKBest(f_classif, k=2).fit(X, y)
X_new = selector.transform(X)
print('After feature selection:', X_new.shape)
print('Scores:', selector.scores_)
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import gzip, pickle, sys
f = gzip.open('Datasets/mnist.pkl.gz', 'rb')
(input_train, output_train), (input_test, output_test), _ = pickle.load(f, encoding='bytes')
for i in range(4):
plt.subplot(2,2,i+1)
plt.imshow(input_train[i].reshape((28,28)), cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
from sklearn.ensemble import RandomForestClassifier
randomforest = RandomForestClassifier(n_estimators=30)
randomforest.fit(input_train,output_train)
from sklearn.metrics import classification_report
print(classification_report(output_test, randomforest.predict(input_test)))
from sklearn.decomposition import PCA
pca = PCA(n_components=500)
pca.fit(input_train)
plt.figure(figsize=(12,6))
plt.plot(np.cumsum(pca.explained_variance_ratio_[0:500]),marker = 'o')
plt.show()
np.cumsum(pca.explained_variance_ratio_[0:500])[200]
pca = PCA(n_components= 200)
pca.fit(input_train)
x_train = pca.transform(input_train)
x_test = pca.transform(input_test)
for i in range(4):
plt.subplot(2,2,i+1)
plt.imshow(pca.inverse_transform(x_train)[i].reshape((28, 28)), cmap=plt.cm.gray_r, interpolation='nearest' )
plt.show()
randomforest = RandomForestClassifier(n_estimators=30)
randomforest.fit(x_train,output_train)
from sklearn.metrics import classification_report
print(classification_report(output_test, randomforest.predict(x_test)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question
|
7,247
|
<ASSISTANT_TASK:>
Python Code:
%%bash
git clone https://github.com/amueller/introduction_to_ml_with_python.git
from scipy.misc import imread
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now go back to your Jupyter Hub file list, to access the code examples.
|
7,248
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from gprMax.waveforms import Waveform
from tools.plot_source_wave import check_timewindow, mpl_plot
w = Waveform()
w.type = 'ricker'
w.amp = 1
w.freq = 25e6
timewindow = 300e-9
dt = 8.019e-11
timewindow, iterations = check_timewindow(timewindow, dt)
plt = mpl_plot(w, timewindow, dt, iterations, fft=True)
%matplotlib inline
import numpy as np
from gprMax.waveforms import Waveform
from tools.plot_source_wave import check_timewindow, mpl_plot
waveformvalues = np.loadtxt('/Users/cwarren/Desktop/sajad/SW_corrected.txt', skiprows=1, dtype=np.float32)
w = Waveform()
w.type = 'user'
w.amp = 1
w.freq = 0
w.uservalues = waveformvalues[:]
timewindow = 2e-9
dt = 4.71731e-12
timewindow, iterations = check_timewindow(timewindow, dt)
w.uservalues = np.zeros(iterations, dtype=np.float32)
w.uservalues[0:len(waveformvalues)] = waveformvalues[:]
plt = mpl_plot(w, timewindow, dt, iterations, fft=True)
from math import sqrt
# Speed of light in vacuum (m/s)
c = 299792458
# Highest relative permittivity present in model
er = 81
# Maximum frequency present in model
fmax = 80e6
# Minimum wavelength
wmin = c / (fmax * sqrt(er))
# Maximum spatial resolution (allowing 10 cells per wavelength)
dmin = wmin / 10
# Time steps at CFL limits for cubic cells
dt3D = dmin / (sqrt(3) * c)
dt2D = dmin / (sqrt(2) * c)
print('Minimum wavelength: {:g} m'.format(wmin))
print('Maximum spatial resolution: {:g} m'.format(dmin))
print('Time step for 3D cubic cell: {:g} s'.format(dt3D))
print('Time step for 2D square cell: {:g} s'.format(dt2D))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting a user-defined waveform
Step2: Determining a spatial resolution
|
7,249
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from nst_utils import *
import numpy as np
import tensorflow as tf
%matplotlib inline
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
print(model)
content_image = scipy.misc.imread("images/louvre.jpg")
imshow(content_image)
# GRADED FUNCTION: compute_content_cost
def compute_content_cost(a_C, a_G):
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.transpose(tf.reshape(a_C, [-1, n_H * n_W , n_C]))
a_G_unrolled = tf.transpose(tf.reshape(a_G, [-1, n_H * n_W , n_C]))
# compute the cost with tensorflow (≈1 line)
J_content = tf.reduce_sum(tf.square(a_C_unrolled - a_G_unrolled))/(4 * m * n_H * n_W * n_C)
### END CODE HERE ###
return J_content
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
style_image = scipy.misc.imread("images/monet_800600.jpg")
imshow(style_image)
# GRADED FUNCTION: gram_matrix
def gram_matrix(A):
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
### START CODE HERE ### (≈1 line)
GA = tf.matmul(A, tf.transpose(A))
### END CODE HERE ###
return GA
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2*1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = " + str(GA.eval()))
# GRADED FUNCTION: compute_layer_style_cost
def compute_layer_style_cost(a_S, a_G):
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.transpose(tf.reshape(a_S, [n_H * n_W, n_C]))
a_G = tf.transpose(tf.reshape(a_G, [n_H * n_W, n_C]))
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
J_style_layer = tf.reduce_sum(tf.square(tf.subtract(GS, GG)))/(4 * (n_C * n_H * n_W)**2)
### END CODE HERE ###
return J_style_layer
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
def compute_style_cost(model, STYLE_LAYERS):
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
# GRADED FUNCTION: total_cost
def total_cost(J_content, J_style, alpha = 10, beta = 40):
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
### START CODE HERE ### (≈1 line)
J = alpha * J_content + beta * J_style
### END CODE HERE ###
return J
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
content_image = scipy.misc.imread("images/louvre_small.jpg")
content_image = reshape_and_normalize_image(content_image)
style_image = scipy.misc.imread("images/monet.jpg")
style_image = reshape_and_normalize_image(style_image)
generated_image = generate_noise_image(content_image)
imshow(generated_image[0])
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(model, STYLE_LAYERS)
### START CODE HERE ### (1 line)
J = total_cost(J_content, J_style, alpha = 10, beta = 40)
### END CODE HERE ###
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model["input"].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
_ = sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model["input"])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
model_nn(sess, generated_image)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 - Problem Statement
Step2: The model is stored in a python dictionary where each variable name is the key and the corresponding value is a tensor containing that variable's value. To run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the tf.assign function. In particular, you will use the assign function like this
Step4: The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds.
Step5: Expected Output
Step7: This painting was painted in the style of impressionism.
Step9: Expected Output
Step10: Expected Output
Step12: You can combine the style costs for different layers as follows
Step14: Note
Step15: Expected Output
Step16: Let's load, reshape, and normalize our "content" image (the Louvre museum picture)
Step17: Let's load, reshape and normalize our "style" image (Claude Monet's painting)
Step18: Now, we initialize the "generated" image as a noisy image created from the content_image. By initializing the pixels of the generated image to be mostly noise but still slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image. (Feel free to look in nst_utils.py to see the details of generate_noise_image(...); to do so, click "File-->Open..." at the upper-left corner of this Jupyter notebook.)
Step19: Next, as explained in part (2), let's load the VGG16 model.
Step20: To get the program to compute the content cost, we will now assign a_C and a_G to be the appropriate hidden layer activations. We will use layer conv4_2 to compute the content cost. The code below does the following
Step21: Note
Step22: Exercise
Step23: You'd previously learned how to set up the Adam optimizer in TensorFlow. Lets do that here, using a learning rate of 2.0. See reference
Step24: Exercise
Step25: Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs.
|
7,250
|
<ASSISTANT_TASK:>
Python Code:
%%HTML
<img src="https://imgs.xkcd.com/comics/bun_alert.png" width=500></img>
%%HTML
<blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">Pay no mind.... <a href="https://t.co/mnIPHJXE1h">pic.twitter.com/mnIPHJXE1h</a></p>— David Beazley (@dabeaz) <a href="https://twitter.com/dabeaz/status/890634046958477312">July 27, 2017</a></blockquote>
<script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
# let's reproduce it
class A():
pass
A.__dict__ is A.__dict__
# ... and more robustly...
a = A()
a.__class__.__dict__ is a.__class__.__dict__
class B():
pass
C = type('C',(),dict())
D = type('C',(),dict())
D
# Start with the equivalence operator (==)
# --> remember that this will be defined by the ".__eq__()" method of the argument on the left
B == B
B == C
C == D
B == D
# check the directory of the object's attributes (more about this later)
vars(B)
vars(B) == vars(B)
vars(B) == vars(C)
vars(C) == vars(D)
# let's cast it to a real 'dict'
dict(vars(D))
dict(vars(B)) == dict(vars(C))
dict(vars(C)) == dict(vars(D))
# check the directory of attributes (more about this later)
dir(B)
dir(B) == dir(B)
dir(B) == dir(C)
dir(C) == dir(D)
# instance and type
isinstance(B,type)
isinstance(B,object)
type(B)
B.__class__
B.__base__
B.__bases__
id(B)
# the 'is' operator compares the result of the 'id' function's application to the arguments
B is B
id(B) == id(B)
# now use B's callability to create an instance of it
b = B()
isinstance(b,B)
type(b).__bases__
# FWIW
type(type)
type.__bases__
# set some attributes of some objects
setattr(b,'an_instance_attr',1)
setattr(B,'a_class_attr',2)
setattr(B,'a_class_method',lambda x: 3)
vars(b)
b.__dict__
vars(B)
vars(type)
vars(object)
# collect all the instance attributes of the inheritance tree (don't include type)
attribute_keys = set( list(vars(b).keys()) + list(vars(B).keys()) + list(vars(object).keys()))
for attribute_key in attribute_keys:
print('{} : {}'.format(attribute_key,getattr(b,attribute_key)))
# our manual attributes collection should match that from 'dir'
attribute_keys - set(dir(b))
b.an_instance_attr
B.an_instance_attr
B.a_class_attr
b.a_class_attr
b.a_class_method
b.a_class_method()
B.mro()
# Python's MRO invokes a smart algorithm that accounts for circularity in the inheritance tree
# https://en.wikipedia.org/wiki/C3_linearization
class X():
a = 1
class Y():
b = 2
class Z(X,Y):
c = 3
Z.mro()
Z.c
Z.b
Z.a
# get an attribute defined only by the base class
Z.__repr__
# let's start with the instance-level attribute dictionary
b.__dict__['an_attr'] = 'value'
b.__dict__
# I don't know why anyone would want to do this, but we'll allow it at the level of instance objects.
# Any hashable object can be a key in an ordinary dictionary.
b.__dict__[1] = [3,4]
# what happens if we do the same to `b`'s class?
b.__class__.__dict__[1] = [3,4]
# right, we've seen this "mappingproxy" before
b.__class__.__dict__
# also equivalent
B.__dict__
# turns out, it's a method of 'object'
B.__setattr__
setattr(B,1,2)
%%HTML
<blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">Pay no mind.... <a href="https://t.co/mnIPHJXE1h">pic.twitter.com/mnIPHJXE1h</a></p>— David Beazley (@dabeaz) <a href="https://twitter.com/dabeaz/status/890634046958477312">July 27, 2017</a></blockquote>
<script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
# the example
A.__dict__ is A.__dict__
# run this a few times
id(A.__dict__)
# what about this?
id(A.__dict__) == id(A.__dict__)
# or this?
x = id(A.__dict__)
y = id(A.__dict__)
x == y
# or this?
x = A.__dict__
y = A.__dict__
id(x) == id(y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Problem
Step2: Our path...
Step3: Takeaways
Step4: Takeaways
Step5: Takeaways
Step6: Conclusion
Step7: NOTE
Step8: Take-aways
Step9: To locate the attribute named my_attr, Python
Step10: The MappingProxyType type is a read-only view of a mapping (dictionary). So we can't set instance attributes via this attribute. This requires that attributes be set with setattr, which calls __setattr__.
Step11: Take-away
Step12: Takeaway
|
7,251
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# importing Qiskit
from qiskit import Aer, IBMQ
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import available_backends, execute, register, get_backend, compile
from qiskit.tools import visualization
from qiskit.tools.visualization import circuit_drawer
q = QuantumRegister(6)
qc = QuantumCircuit(q)
qc.x(q[2])
qc.cx(q[1], q[5])
qc.cx(q[2], q[5])
qc.cx(q[3], q[5])
qc.ccx(q[1], q[2], q[4])
qc.ccx(q[3], q[4], q[5])
qc.ccx(q[1], q[2], q[4])
qc.x(q[2])
circuit_drawer(qc)
def black_box_u_f(circuit, f_in, f_out, aux, n, exactly_1_3_sat_formula):
Circuit that computes the black-box function from f_in to f_out.
Create a circuit that verifies whether a given exactly-1 3-SAT
formula is satisfied by the input. The exactly-1 version
requires exactly one literal out of every clause to be satisfied.
num_clauses = len(exactly_1_3_sat_formula)
for (k, clause) in enumerate(exactly_1_3_sat_formula):
# This loop ensures aux[k] is 1 if an odd number of literals
# are true
for literal in clause:
if literal > 0:
circuit.cx(f_in[literal-1], aux[k])
else:
circuit.x(f_in[-literal-1])
circuit.cx(f_in[-literal-1], aux[k])
# Flip aux[k] if all literals are true, using auxiliary qubit
# (ancilla) aux[num_clauses]
circuit.ccx(f_in[0], f_in[1], aux[num_clauses])
circuit.ccx(f_in[2], aux[num_clauses], aux[k])
# Flip back to reverse state of negative literals and ancilla
circuit.ccx(f_in[0], f_in[1], aux[num_clauses])
for literal in clause:
if literal < 0:
circuit.x(f_in[-literal-1])
# The formula is satisfied if and only if all auxiliary qubits
# except aux[num_clauses] are 1
if (num_clauses == 1):
circuit.cx(aux[0], f_out[0])
elif (num_clauses == 2):
circuit.ccx(aux[0], aux[1], f_out[0])
elif (num_clauses == 3):
circuit.ccx(aux[0], aux[1], aux[num_clauses])
circuit.ccx(aux[2], aux[num_clauses], f_out[0])
circuit.ccx(aux[0], aux[1], aux[num_clauses])
else:
raise ValueError('We only allow at most 3 clauses')
# Flip back any auxiliary qubits to make sure state is consistent
# for future executions of this routine; same loop as above.
for (k, clause) in enumerate(exactly_1_3_sat_formula):
for literal in clause:
if literal > 0:
circuit.cx(f_in[literal-1], aux[k])
else:
circuit.x(f_in[-literal-1])
circuit.cx(f_in[-literal-1], aux[k])
circuit.ccx(f_in[0], f_in[1], aux[num_clauses])
circuit.ccx(f_in[2], aux[num_clauses], aux[k])
circuit.ccx(f_in[0], f_in[1], aux[num_clauses])
for literal in clause:
if literal < 0:
circuit.x(f_in[-literal-1])
# -- end function
def n_controlled_Z(circuit, controls, target):
Implement a Z gate with multiple controls
if (len(controls) > 2):
raise ValueError('The controlled Z with more than 2 ' +
'controls is not implemented')
elif (len(controls) == 1):
circuit.h(target)
circuit.cx(controls[0], target)
circuit.h(target)
elif (len(controls) == 2):
circuit.h(target)
circuit.ccx(controls[0], controls[1], target)
circuit.h(target)
# -- end function
def inversion_about_average(circuit, f_in, n):
Apply inversion about the average step of Grover's algorithm.
# Hadamards everywhere
for j in range(n):
circuit.h(f_in[j])
# D matrix: flips the sign of the state |000> only
for j in range(n):
circuit.x(f_in[j])
n_controlled_Z(circuit, [f_in[j] for j in range(n-1)], f_in[n-1])
for j in range(n):
circuit.x(f_in[j])
# Hadamards everywhere again
for j in range(n):
circuit.h(f_in[j])
# -- end function
qr = QuantumRegister(3)
qInvAvg = QuantumCircuit(qr)
inversion_about_average(qInvAvg, qr, 3)
circuit_drawer(qInvAvg)
Grover search implemented in Qiskit.
This module contains the code necessary to run Grover search on 3
qubits, both with a simulator and with a real quantum computing
device. This code is the companion for the paper
"An introduction to quantum computing, without the physics",
Giacomo Nannicini, https://arxiv.org/abs/1708.03684.
def input_state(circuit, f_in, f_out, n):
(n+1)-qubit input state for Grover search.
for j in range(n):
circuit.h(f_in[j])
circuit.x(f_out)
circuit.h(f_out)
# -- end function
# Make a quantum program for the n-bit Grover search.
n = 3
# Exactly-1 3-SAT formula to be satisfied, in conjunctive
# normal form. We represent literals with integers, positive or
# negative, to indicate a Boolean variable or its negation.
exactly_1_3_sat_formula = [[1, 2, -3], [-1, -2, -3], [-1, 2, 3]]
# Define three quantum registers: 'f_in' is the search space (input
# to the function f), 'f_out' is bit used for the output of function
# f, aux are the auxiliary bits used by f to perform its
# computation.
f_in = QuantumRegister(n)
f_out = QuantumRegister(1)
aux = QuantumRegister(len(exactly_1_3_sat_formula) + 1)
# Define classical register for algorithm result
ans = ClassicalRegister(n)
# Define quantum circuit with above registers
grover = QuantumCircuit()
grover.add(f_in)
grover.add(f_out)
grover.add(aux)
grover.add(ans)
input_state(grover, f_in, f_out, n)
T = 2
for t in range(T):
# Apply T full iterations
black_box_u_f(grover, f_in, f_out, aux, n, exactly_1_3_sat_formula)
inversion_about_average(grover, f_in, n)
# Measure the output register in the computational basis
for j in range(n):
grover.measure(f_in[j], ans[j])
# Execute circuit
backend = Aer.get_backend('qasm_simulator')
job = execute([grover], backend=backend, shots=1000)
result = job.result()
# Get counts and plot histogram
counts = result.get_counts(grover)
visualization.plot_histogram(counts)
IBMQ.load_accounts()
# get ibmq_16_rueschlikon configuration and coupling map
backend = IBMQ.get_backend('ibmq_16_melbourne')
backend_config = backend.configuration()
backend_coupling = backend_config['coupling_map']
# compile the circuit for ibmq_16_rueschlikon
grover_compiled = compile(grover, backend=backend, coupling_map=backend_coupling, seed=1)
grover_compiled_qasm = grover_compiled.experiments[0].header.compiled_circuit_qasm
print("Number of gates for", backend.name(), "is", len(grover_compiled_qasm.split("\n")) - 4)
circuit_drawer(grover)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: In the sub-circuit above, the three ccx gates on the right are used to compute $( q_1 \wedge \neg q_2 \wedge q_3)$ and write the result to $q_5$, while the three cx gates on the left are used to compute $q_1 \oplus \neg q_2 \oplus q_3$ and write the result to $q_5$. Notice that the right-most ccx gate is used to reset the value of $q_4$ so that it can be reused in the succeeding sub-circuits.
Step4: Inversion about the average
Step6: Finally, the inversion-about-the-average circuit can be realized by the following function
Step7: Here is a circuit of the inversion about the average on three qubits.
Step10: Grover Search
Step11: Running the circuit in real devices
Step12: The number of gates is in the order of thousands which is above the limits of decoherence time of the current near-term quantum computers. It is a challenge to design a quantum circuit for Grover search to solve large optimization problems.
|
7,252
|
<ASSISTANT_TASK:>
Python Code:
string_number = "1066"
integer = int(string_number)
print(int(integer))
print(type(integer)) # <-- the type function returns the type of the object passed in eg 1 is an integer, "hi" is a string, etc.
int("1111", base = 2)
int(6.99999999999999)
a = 10
b = 5
print(a + b) # addition
print(a - b) # subtraction
print(a * b) # multiplication
print(a ** b) # exponentation
print(a % b) # modular arithmetic
print(a / b) # divsion (note, returns a float! Also, Python2 and Python3 act differently here, so beware!)
print(a // b) # integer division
# For negative numbers, just add a "-" symbol before the number, for example:
print(-a, a)
# Grabbing the last digit:
print(123423420 % 10)
print(1234234231 % 10)
print(12342342302 % 10)
print(123423423023 % 10)
print(1234234230244 % 10)
print(12342342302445 % 10)
print("\n")
# Grabbing the last 3 digits:
print(12342342302445 % 1000)
print(1234234230244 % 1000)
print(123423423023 % 1000) # <-- note 0 is dropped, integers cannot start with a 0. Thus 023 is simply 23
print("\n")
# grabbing the last 6 digits
print(12342342302 % 1000000)
print(123423423 % 1000000)
print(1234567 % 1000000)
print(12345 % 1000000)
def time(time):
time measured in total elapsed minutes, function converts to days, hours, mins
t = time // 60
days = t // 24 # 24 hours in a day
hours = t % 24 # after working out number of days, get leftover hours
mins = time % 60
return "Input in mins = '{}' which is: {} days {} hours and {} mins".format(time, days, hours, str(mins).zfill(2))
print(time(1))
print(time(60))
print(time(61))
print(time(960))
print(time(1440))
print(time(1447))
print(time(33456456))
string_number = "10.66"
print(float(string_number))
print(float(-3.2e20)) # < -- Python supports scientific notation...
print(float('inf')) # < -- Python also has a way of representing infinity
print(float(0), float(10), float(5), sep="\n") # notice that when 'float' gets an integer as input '.0' is added to the number
30 / 0
30 % 0
30 // 0
## The only difference a-through-f is where we put a pair of parentheses.
a = 30 - 6 ** 4 * 30 + 20 / 2
b = (30 - 6) ** 4 * 30 + 20 / 2
c = 30 - 6 ** (4 * 30) + 20 / 2
d = 30 - 6 ** 4 * (30 + 20) / 2
e = 30 - 6 ** (4 * 30 + 20 / 2)
f = 30 - (6 ** 4 * 30 + 20 / 2)
print(a, b, c, d, e, f, sep= "\n")
from math import log2, sqrt
print(sqrt(100))
## Alternatively, you can also get the square root of a number by raising it to the 0.5 power
print(100**0.5)
print(log2(100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The int() function also takes an optional argument base as well. So for example “1111”, base=2 will treat “1111” as a binary number and will return whatever that number is in base 10.
Step2: If we give the int function a float (more on them later) it will return the integer part of the number. Take care to note it always rounds down.
Step3: Integer Methods
Step4: Modular Arithmetic
Step6: Integer Divsion
Step7: Floats
Step8: Dividing by Zero
Step9: Order of Operations
Step10: So thats some basic mathmatics for you. Python does have support for fractions, complex numbers and a bunch of other stuff as well. But if you want that functionality you have to go hunting for it.
|
7,253
|
<ASSISTANT_TASK:>
Python Code:
import femagtools.machine
p = 4
r1 = 0.0806
ls = 0.0
ld = [0.0014522728, 0.0014522728]
lq = [0.0038278836, 0.0032154]
psim = [0.11171972, 0.11171972]
i1 = [80.0]
beta = [-41.1, 0]
pm = femagtools.machine.PmRelMachineLdq(3, p, psim, ld, lq, r1, beta, i1, ls)
pm.iqd_torque(170)
pm.torque_iqd(87.16183261, -71.16228193)
femagtools.machine.betai1(87.16, -71.16)
femagtools.machine.iqd(-0.685, 79.56)
import matplotlib.pylab as plt
import numpy as np
beta=np.linspace(-np.pi/2, 0)
plt.plot(180*beta/np.pi, [pm.torque_iqd(*femagtools.machine.iqd(b, 80))
for b in beta])
plt.xlabel('Beta')
plt.ylabel('Torque / Nm')
plt.grid()
f1 = 75
w1 = 2*np.pi*f1
np.linalg.norm(pm.uqd(w1, 87.16183261, -71.16228193))
Tmax = 170.0
u1 = 230.0
w1 = pm.w1_umax(u1, *pm.iqd_torque(Tmax))
print("Frequency {}".format(w1/np.pi/2))
def torque(T, pmax, wm):
shaft torque as a function of rotor angular speed
if wm <= pmax / T:
return T
return pmax / wm
nmax = 6000/60
pmax = w1/p*Tmax
n = np.append(np.linspace(0, w1/2/np.pi/p),
np.linspace(1.1*w1/2/np.pi/p, nmax))
plt.plot(60*n, [torque(Tmax, pmax, 2*np.pi*nx) for nx in n])
plt.ylabel('Torque / Nm')
plt.xlabel('Speed / rpm')
plt.grid()
bch = femagtools.read_bchfile('LDQ-0-90.BCH')
pm = femagtools.machine.create(bch, r1 = 0, ls = 0)
char = pm.characteristics(Tmax, n=6000/60, u1max=230)
plt.plot([60*n for n in char['n']], char['T'])
plt.xlabel('Speed / rpm')
plt.ylabel('Torque / Nm')
plt.grid()
import femagtools.plot
femagtools.plot.mtpa(pm, bch.ldq['i1'][-1])
femagtools.plot.mtpa(pm, bch.ldq['i1'][-1], projection='3d')
plt.show()
import femagtools.erg
erg = femagtools.erg.read('ldlq.erg')
pm = femagtools.machine.create(erg, r1 = 0, ls = 0, lfe=350)
Tmax = 9500
nmax = 600/60
u1max = 500/np.sqrt(3)
char = pm.characteristics(Tmax, n=nmax, u1max=u1max)
n = np.array(char['n'])
fig, axs = plt.subplots(2, sharex=True)
axs[0].plot(60*n, char['T'], 'b-', label='Torque')
axs[0].set_ylabel("Torque / Nm")
axs[0].grid()
axs[0].legend(loc='upper right')
ax2 = axs[0].twinx()
ax2.plot(60*n, char['i1'], 'r-', label='Current')
ax2.set_ylabel("Current / A")
ax2.legend(loc='lower left')
axs[1].plot(60*n, char['u1'], 'b-', label='Voltage')
axs[1].set_ylabel("Voltage / V",)
axs[1].set_xlabel("Speed / rpm")
axs[1].grid()
axs[1].legend(loc='upper left')
ax3 = axs[1].twinx()
ax3.plot(60*n[1:], char['cosphi'][1:], 'r-', label='Cos Phi')
ax3.set_ylabel("Cos Phi")
ax3.legend(loc='lower right')
fig.tight_layout()
plt.show()
pm.iqd_uqd(w1,0,0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For example we can calculate the iq and id current for a given torque of 170 Nm
Step2: Or reversely
Step3: For the transformation of i1-beta and id-iq components there are corresponding functions
Step4: Show the torque vs beta curve
Step5: Calculate the amplitude of the phase voltage at frequency 75 Hz
Step6: Find the frequency from maximum torque and voltage
Step8: Plot the speed torque characteristics
Step9: Show the torque plot with the motor parameters from a Ld-Lq-Identification
Step10: Display the characteristics with a MTPA Plot
Step11: With surface
Step12: The equivalent model can be created with parameters from a ERG file as well
|
7,254
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = sorted(set(text))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
len(vocab)
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch (num of seqs is different from num of batches)
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
characters_per_batch = n_seqs * n_steps
n_batches = len(arr) // characters_per_batch
# Keep only enough characters to make full batches
arr = arr[: n_batches * characters_per_batch]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, n_steps * n_batches)) # arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n: n + n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
x1, y1 = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
result0 = [int_to_vocab[ele] for ele in x[0, :]]
print(result0)
result1 = [int_to_vocab[ele] for ele in x[1, :]]
print(result1)
result0 = [int_to_vocab[ele] for ele in x1[0, :]]
print(result0)
result1 = [int_to_vocab[ele] for ele in x1[1, :]]
print(result1)
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(dtype=tf.int32, shape=(batch_size, num_steps), name="input")
targets = tf.placeholder(dtype=tf.int32, shape=(batch_size, num_steps), name="targets")
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(dtype=tf.float32, name="input")
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
def build_cell(num_units, keep_prob):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
cell = tf.contrib.rnn.MultiRNNCell([build_cell(lstm_size, keep_prob) for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells (which might means L in the above instruction?)
out_size: Size of this softmax layer. The softmax output gives us a probability distribution we can use to predict the next character,
so we want this layer to have size C, the number of classes/characters we have in our text.
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
# lstm_output is N×M×L and after concatenation the result should be (M∗N)×L
seq_output = tf.concat(lstm_output, axis=1)
print("seq_output.shape: ", seq_output.shape)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, shape=(-1, in_size))
print("x.shape: ", x.shape)
print("out_size: ", out_size)
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal(shape=(in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(shape=(out_size)))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer, shape is (10000, 83)
targets: Targets for supervised learning, shape is (100, 100)
lstm_size: Number of LSTM hidden units, 512
num_classes: Number of classes in targets, 83
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(indices=targets, depth=num_classes)
y_reshaped = tf.reshape(y_one_hot, shape=logits.shape)
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_reshaped, logits=logits)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(indices=self.inputs, depth=num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes) # for the record, lstm_size is the number of hidden layer
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
batch_size = 10 # Sequences per batch
num_steps = 50 # Number of sequence steps per batch
lstm_size = 128 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.01 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end - start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training mini-batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: If you implemented get_batches correctly, the above output should look something like
Step8: LSTM Cell
Step9: RNN Output
Step10: Training loss
Step11: Optimizer
Step12: Build the network
Step13: Hyperparameters
Step14: Time for training
Step15: Saved checkpoints
Step16: Sampling
Step17: Here, pass in the path to a checkpoint and sample from the network.
|
7,255
|
<ASSISTANT_TASK:>
Python Code:
def sparsity_to_x_intercept(d, p):
sign = 1
if p > 0.5:
p = 1.0 - p
sign = -1
return sign * np.sqrt(1-scipy.special.betaincinv((d-1)/2.0, 0.5, 2*p))
D = 32
N = 1000000
sparsity = 0.1
intercept = sparsity_to_x_intercept(D, sparsity)
model = nengo.Network()
with model:
ens = nengo.Ensemble(n_neurons=1, dimensions=D,
intercepts=[intercept])
sim = nengo.Simulator(model)
# generate samples just on the surface of the sphere
pts = nengo.dists.UniformHypersphere(surface=True).sample(N, D)
_, A = nengo.utils.ensemble.tuning_curves(ens, sim, inputs=pts)
print('Computed sparsity:', np.mean(A>0))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: One thing to note is that if we want the same thing but for volume (i.e. for representing points that are inside the hypersphere), then we can do the same derivation but using the volume formula. The only difference is that instead of d-1, you get d+1. The d+1 version of this formula is what I used for the original derivation of intercepts that lead to the CosineSimilarity(D-2) suggestion for initializing intercepts (if you want a uniform distribution of sparsity). For that derivation, see https
|
7,256
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import matplotlib.pylab as plt
import numpy as np
from distutils.version import StrictVersion
import sklearn
print(sklearn.__version__)
assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')
import keras
print(keras.__version__)
assert StrictVersion(keras.__version__) >= StrictVersion('2.0.0')
import pandas as pd
print(pd.__version__)
assert StrictVersion(pd.__version__) >= StrictVersion('0.20.0')
from sklearn.datasets import load_iris
iris = load_iris()
iris.data[0]
print(iris.DESCR)
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names)
CMAP = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
pd.plotting.scatter_matrix(iris_df, c=iris.target, edgecolor='black', figsize=(15, 15), cmap=CMAP)
plt.show()
# keras.layers.Input?
from keras.layers import Input
inputs = Input(shape=(4, ))
# keras.layers.Dense?
from keras.layers import Dense
# just linear activation (like no activation function at all)
fc = Dense(3)(inputs)
from keras.models import Model
model = Model(input=inputs, output=fc)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# this is just random stuff, no training has taken place so far
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
inputs = Input(shape=(4, ))
fc = Dense(3)(inputs)
predictions = Dense(3, activation='softmax')(fc)
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
X = np.array(iris.data)
y = np.array(iris.target)
X.shape, y.shape
y[100]
# tiny little pieces of feature engeneering
from keras.utils.np_utils import to_categorical
num_categories = 3
y = to_categorical(y, num_categories)
y[100]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
!rm -r tf_log
tb_callback = keras.callbacks.TensorBoard(log_dir='./tf_log')
# https://keras.io/callbacks/#tensorboard
# To start tensorboard
# tensorboard --logdir=/mnt/c/Users/olive/Development/ml/tf_log
# open http://localhost:6006
%time model.fit(X_train, y_train, epochs=500, validation_split=0.2, callbacks=[tb_callback])
# %time model.fit(X_train, y_train, epochs=500, validation_split=0.2)
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
X[0], y[0]
train_loss, train_accuracy = model.evaluate(X_train, y_train)
train_loss, train_accuracy
test_loss, test_accuracy = model.evaluate(X_test, y_test)
test_loss, test_accuracy
# Keras format
model.save('nn-iris.hdf5')
import os
from keras import backend as K
K.set_learning_phase(0)
sess = K.get_session()
!rm -r tf
tf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory.')
FLAGS = tf.app.flags.FLAGS
export_path_base = 'tf'
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(FLAGS.model_version)))
classification_inputs = tf.saved_model.utils.build_tensor_info(model.input)
classification_outputs_scores = tf.saved_model.utils.build_tensor_info(model.output)
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
signature = predict_signature_def(inputs={'inputs': model.input},
outputs={'scores': model.output})
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
sess,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
})
builder.save()
!ls -lhR tf
# cd tf
# gsutil cp -R 1 gs://irisnn
# create model and version at https://console.cloud.google.com/mlengine
# gcloud ml-engine predict --model=irisnn --json-instances=./sample_iris.json
# SCORES
# [0.9954029321670532, 0.004596732556819916, 3.3544753819114703e-07]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solving Iris with Neural Networks
Step2: The artificial Neuron
Step3: This is the output of all 3 hidden neurons, but what we really want is a category for iris category
Step4: Now we have likelyhoods for categories, but still our model is totally random
Step5: Evaluation
Step6: Hands-On
Step7: Export as raw tf model
Step8: This TensorFlow Modell can be uploaded to Google Cloud ML and called via REST interface
|
7,257
|
<ASSISTANT_TASK:>
Python Code:
x_train,y_train,x_valid,y_valid = get_data()
x_train,x_valid = normalize_to(x_train,x_valid)
train_ds,valid_ds = Dataset(x_train, y_train),Dataset(x_valid, y_valid)
nh,bs = 50,512
c = y_train.max().item()+1
loss_func = F.cross_entropy
data = DataBunch(*get_dls(train_ds, valid_ds, bs), c)
mnist_view = view_tfm(1,28,28)
cbfs = [Recorder,
partial(AvgStatsCallback,accuracy),
CudaCallback,
partial(BatchTransformXCallback, mnist_view)]
nfs = [8,16,32,64,64]
learn,run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs)
%time run.fit(2, learn)
class BatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# NB: pytorch bn mom is opposite of what you'd expect
self.mom,self.eps = mom,eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('vars', torch.ones(1,nf,1,1))
self.register_buffer('means', torch.zeros(1,nf,1,1))
def update_stats(self, x):
m = x.mean((0,2,3), keepdim=True)
v = x.var ((0,2,3), keepdim=True)
self.means.lerp_(m, self.mom)
self.vars.lerp_ (v, self.mom)
return m,v
def forward(self, x):
if self.training:
with torch.no_grad(): m,v = self.update_stats(x)
else: m,v = self.means,self.vars
x = (x-m) / (v+self.eps).sqrt()
return x*self.mults + self.adds
def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs):
# No bias needed if using bn
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn),
GeneralRelu(**kwargs)]
if bn: layers.append(BatchNorm(nf))
return nn.Sequential(*layers)
#export
def init_cnn_(m, f):
if isinstance(m, nn.Conv2d):
f(m.weight, a=0.1)
if getattr(m, 'bias', None) is not None: m.bias.data.zero_()
for l in m.children(): init_cnn_(l, f)
def init_cnn(m, uniform=False):
f = init.kaiming_uniform_ if uniform else init.kaiming_normal_
init_cnn_(m, f)
def get_learn_run(nfs, data, lr, layer, cbs=None, opt_func=None, uniform=False, **kwargs):
model = get_cnn_model(data, nfs, layer, **kwargs)
init_cnn(model, uniform=uniform)
return get_runner(model, data, lr=lr, cbs=cbs, opt_func=opt_func)
learn,run = get_learn_run(nfs, data, 0.9, conv_layer, cbs=cbfs)
with Hooks(learn.model, append_stats) as hooks:
run.fit(1, learn)
fig,(ax0,ax1) = plt.subplots(1,2, figsize=(10,4))
for h in hooks[:-1]:
ms,ss = h.stats
ax0.plot(ms[:10])
ax1.plot(ss[:10])
h.remove()
plt.legend(range(6));
fig,(ax0,ax1) = plt.subplots(1,2, figsize=(10,4))
for h in hooks[:-1]:
ms,ss = h.stats
ax0.plot(ms)
ax1.plot(ss)
learn,run = get_learn_run(nfs, data, 1.0, conv_layer, cbs=cbfs)
%time run.fit(3, learn)
#export
def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn),
GeneralRelu(**kwargs)]
if bn: layers.append(nn.BatchNorm2d(nf, eps=1e-5, momentum=0.1))
return nn.Sequential(*layers)
learn,run = get_learn_run(nfs, data, 1., conv_layer, cbs=cbfs)
%time run.fit(3, learn)
sched = combine_scheds([0.3, 0.7], [sched_lin(0.6, 2.), sched_lin(2., 0.1)])
learn,run = get_learn_run(nfs, data, 0.9, conv_layer, cbs=cbfs
+[partial(ParamScheduler,'lr', sched)])
run.fit(8, learn)
class LayerNorm(nn.Module):
__constants__ = ['eps']
def __init__(self, eps=1e-5):
super().__init__()
self.eps = eps
self.mult = nn.Parameter(tensor(1.))
self.add = nn.Parameter(tensor(0.))
def forward(self, x):
m = x.mean((1,2,3), keepdim=True)
v = x.var ((1,2,3), keepdim=True)
x = (x-m) / ((v+self.eps).sqrt())
return x*self.mult + self.add
def conv_ln(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True),
GeneralRelu(**kwargs)]
if bn: layers.append(LayerNorm())
return nn.Sequential(*layers)
learn,run = get_learn_run(nfs, data, 0.8, conv_ln, cbs=cbfs)
%time run.fit(3, learn)
class InstanceNorm(nn.Module):
__constants__ = ['eps']
def __init__(self, nf, eps=1e-0):
super().__init__()
self.eps = eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
def forward(self, x):
m = x.mean((2,3), keepdim=True)
v = x.var ((2,3), keepdim=True)
res = (x-m) / ((v+self.eps).sqrt())
return res*self.mults + self.adds
def conv_in(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True),
GeneralRelu(**kwargs)]
if bn: layers.append(InstanceNorm(nf))
return nn.Sequential(*layers)
learn,run = get_learn_run(nfs, data, 0.1, conv_in, cbs=cbfs)
%time run.fit(3, learn)
data = DataBunch(*get_dls(train_ds, valid_ds, 2), c)
def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn),
GeneralRelu(**kwargs)]
if bn: layers.append(nn.BatchNorm2d(nf, eps=1e-5, momentum=0.1))
return nn.Sequential(*layers)
learn,run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs)
%time run.fit(1, learn)
class RunningBatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
self.mom,self.eps = mom,eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('batch', tensor(0.))
self.register_buffer('count', tensor(0.))
self.register_buffer('step', tensor(0.))
self.register_buffer('dbias', tensor(0.))
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2,3)
s = x.sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = self.count.new_tensor(x.numel()/nc)
mom1 = 1 - (1-self.mom)/math.sqrt(bs-1)
self.mom1 = self.dbias.new_tensor(mom1)
self.sums.lerp_(s, self.mom1)
self.sqrs.lerp_(ss, self.mom1)
self.count.lerp_(c, self.mom1)
self.dbias = self.dbias*(1-self.mom1) + self.mom1
self.batch += bs
self.step += 1
def forward(self, x):
if self.training: self.update_stats(x)
sums = self.sums
sqrs = self.sqrs
c = self.count
if self.step<100:
sums = sums / self.dbias
sqrs = sqrs / self.dbias
c = c / self.dbias
means = sums/c
vars = (sqrs/c).sub_(means*means)
if bool(self.batch < 20): vars.clamp_min_(0.01)
x = (x-means).div_((vars.add_(self.eps)).sqrt())
return x.mul_(self.mults).add_(self.adds)
def conv_rbn(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn),
GeneralRelu(**kwargs)]
if bn: layers.append(RunningBatchNorm(nf))
return nn.Sequential(*layers)
learn,run = get_learn_run(nfs, data, 0.4, conv_rbn, cbs=cbfs)
%time run.fit(1, learn)
data = DataBunch(*get_dls(train_ds, valid_ds, 32), c)
learn,run = get_learn_run(nfs, data, 0.9, conv_rbn, cbs=cbfs
+[partial(ParamScheduler,'lr', sched_lin(1., 0.2))])
%time run.fit(1, learn)
nb_auto_export()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Batchnorm
Step2: We can then use it in training and see how it helps keep the activations means to 0 and the std to 1.
Step3: Builtin batchnorm
Step4: With scheduler
Step5: More norms
Step6: Thought experiment
Step7: Question
Step8: Running Batch Norm
Step9: This solves the small batch size issue!
Step10: Export
|
7,258
|
<ASSISTANT_TASK:>
Python Code:
from collections import OrderedDict
d = OrderedDict()
d['foo'] = 1
d['bar'] = 2
d['spam'] = 3
d['grok'] = 4
# Outputs "foo 1", "bar 2", "spam 3", "grok 4"
for key in d:
print(key, d[key])
import json
json.dumps(d)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An OrderedDict can be particularly useful when you want to build a mapping that you may want to later serialize or encode into a different format. For example, if you want to precisely control the order of fields appearing in a JSON encoding, first building the data in an OrderedDict will do the trick
|
7,259
|
<ASSISTANT_TASK:>
Python Code:
pickle_dir = '../pickle_files/'
odds_file = 'odds.pkl'
matches_file = 'matches.pkl'
import numpy as np # numerical libraries
import scipy as sp
import pandas as pd # for data analysis
import pandas.io.sql as sql # for interfacing with MySQL database
from scipy import linalg # linear algebra libraries
from scipy import optimize
import matplotlib as mpl # a big library with plotting functionality
import matplotlib.pyplot as plt # a subset of matplotlib with most of the useful tools
import IPython as IP
%matplotlib inline
import pdb # for debugging
from sklearn import linear_model as lm
#%qtconsole
odds= pd.read_pickle(pickle_dir + odds_file)
odds = odds.dropna(axis=0,subset=['PSW','PSL'])
matches= pd.read_pickle(pickle_dir + matches_file)
matches = matches.dropna(axis=0,subset=['winner_rank_points','loser_rank_points'])
data = pd.merge(matches,odds[['PSW','PSL','key']],how='inner',on='key')
IP.display.display(data[0:3])
data['year'] = data['tourney_date'].map(lambda x: x.year)
training = data[data.year.isin([2010,2011,2012])]
validation = data[data.year.isin([2013,2014])]
test = data[data.year.isin([2015,2016])]
# winner rank minus loser rank
rank_diff = (training['winner_rank_points'] - training['loser_rank_points']).values
# index variable: True if higher ranked player won, False otherwise
y = (rank_diff > 0)
# final trainingset with two cols: difference in rankings, high ranked height minus low ranked height
X = np.abs(rank_diff)
# for numerical well-behavedness, we need to scale and center the training
#X1=(X-np.mean(X,0))/np.std(X,axis=0)
X=X/np.std(X,axis=0)
def sigmoid(z):
'''
Usage: sigmoid(z)
Description: Computes value of sigmoid function for scalar.
For vector or matrix, computes values of sigmoid function for each entry.
'''
return 1/(1+np.exp(-z));
# define a cost function
def costFunction(theta,X,y,lam,offset=True):
'''
Computes the cost and gradient for logistic regression.
Input:
theta (mx1 numpy array of parameters)
X (nxm numpy array of feature values, first column all 1s)
y (nx1 boolean array of outcomes, 1=higher ranked player won, 0 otherwise)
lam (scalar: regularization paramter)
offset (bool: True of first element of theta represents a translation, False otherwise)
Output:
cost (scalar value of cost)
'''
# number of data points
n = len(y)
#number of parameters
m = len(theta)
# make sure vectors are column vectors for use of "np.dot"
theta = theta.reshape(-1,1)
y = y.reshape(-1,1)
X = X.reshape(-1,1) if m==1 else X
# input to sigmoid function will be a column vector
z = np.dot(X,theta) if len(theta) > 1 else X*theta
# cost function
regterms = np.arange(1,m) if offset else np.arange(0,m)
#Tracer()()
J = (1/n)*(-np.dot(y.transpose(),np.log(sigmoid(z))) - \
np.dot((1-y.transpose()),np.log(1-sigmoid(z))) + \
(lam/(2))*np.sum(theta[regterms]**2))
# gradient
reggrad = np.insert(theta[regterms],0,0)
grad = (1/n)*np.sum((sigmoid(z) - y)*X,0) + (lam/n)*reggrad
return np.squeeze(J), np.squeeze(grad)
# check that cost function works
theta = np.array([1.0])
lam = 0
cost, grad = costFunction(theta, X, y*1,lam)
print("cost:", cost)
print("grad:", grad)
def callbackF(theta):
global NFeval
global X
global y
global lam
cost,grad = costFunction(theta,X,y*1,lam)
print("%4d %3.6f %3.6f %3.6f" % \
(NFeval, theta, cost, grad))
NFeval+=1
NFeval = 1
initial_theta = np.array([.1])
print("iter theta cost grad ")
res = sp.optimize.minimize(lambda t: costFunction(t,X,y*1,lam), initial_theta, method='CG',\
jac=True,options={'maxiter':100,'disp':True},callback=callbackF)
# define figure and axes
fig = plt.figure(figsize=(15,5))
ax0 = fig.add_subplot(131)
ax1 = fig.add_subplot(132)
ax2 = fig.add_subplot(133)
# figure A: predicted probabilities vs. empirical probs
hist, bin_edges = np.histogram(X,bins=100)
p = [np.sum(y[np.where((X>=bin_edges[i]) & (X<bin_edges[i+1]))[0]])/np.max([hist[i],1]) for i in np.arange(len(bin_edges)-1)]
bar_pos = np.arange(len(p))
bar_width = np.diff(bin_edges)
ax0.bar(bin_edges[0:-1], p, width=bar_width, align='edge', alpha=0.5)
r = np.arange(X.min(),X.max(),.1)
s = 1/(1+np.exp(-res.x*r))
ax0.plot(r,s,'r')
ax0.set_xlabel('Scaled rank difference',fontsize=12)
ax0.set_ylabel('Probability that higher ranked wins',fontsize=12)
ax0.set_title('Logistic fit to empirical probabilities',fontsize=12)
ax0.legend(['Logistic probability curve','Empirical probability hist.'])
# figure B: probabilities predicted by odds market
ProbW = 1/training.PSW
ProbL = 1/training.PSL
idx = (training.winner_rank_points>training.loser_rank_points)
odds_prob=np.where(idx,ProbW,ProbL)
t = pd.DataFrame({'X':X,'odds_prob':odds_prob})
ts = t.sort_values('X')
ax1.plot(ts['X'],ts['odds_prob'],'.b')
ax1.plot(r,s,'r')
ax1.set_xlabel('Scaled rank difference',fontsize=12)
ax1.set_ylabel('Probability higher ranked wins',fontsize=12)
ax1.set_title('Probabilities implied by odds market.',fontsize=12)
ax1.legend(['Odds market probabilities','Logistic probability curve'])
# Fig C: variance in odds probabilities as a function of rank difference
x_odds = ts['X'].values.reshape(len(ts),-1)
y_odds = ts['odds_prob'].values
hist, bin_edges = np.histogram(x_odds,bins=10)
stds = [np.std(y_odds[np.where((X>=bin_edges[i]) & (X<bin_edges[i+1]))]) for i in np.arange(len(bin_edges)-1)]
reg = lm.LinearRegression()
reg.fit (bin_edges[0:-1].reshape(10,1),stds)
yv=reg.predict(bin_edges[0:-1].reshape(10,1))
ax2.plot(bin_edges[0:-1],stds,'*b')
ax2.plot(bin_edges[0:-1],yv,'r')
ax2.set_xlabel('Scaled rank difference',fontsize=12)
ax2.set_ylabel('Stdev of market prob.',fontsize=12)
ax2.set_title('Trends in stdev of implied probabilities',fontsize=12)
ax2.legend(['Stdev of binned market-probs.','Regression line'])
lr = lm.LogisticRegression(C=1., solver='lbfgs')
lr.fit(X.reshape(len(X),-1),y*1)
cofs = lr.coef_[0]
print("Regression coefficient from DIY approach: {}".format(res.x))
print("Regression coefficient from Scikit approach: {}".format(cofs[0]))
def calculate_daily_balances(probs,odds,strategy,b=1):
Calculates net-worth time-series for a given set of data, a fixed strategy, and a
value of the regression coefficient
nmatches = len(probs)
investment = nmatches
bet_idx = get_bet_idx(probs,odds,strategy)
if strategy in [1,2]:
stakes = np.abs(bet_idx) # in strategies 1 and 2, bet $1 every time
else: # in strategy 3,size of bet varies with prob
stakes = np.abs(bet_idx)*(probs*(b+1)-1)/b
gains = stakes*(bet_idx>0)*odds.PSW.values
daily_movement = gains-stakes
daily_balances = investment*np.ones(nmatches)
#pdb.set_trace()
for i in np.arange(1,len(validation)):
daily_balances[i]=daily_balances[i-1]+daily_movement[i]
return daily_balances
def get_bet_idx(probs,odds,strategy):
calculates whether or not a bet is placed, and if so, on whom.
returns 1 for "winner", -1 for "loser", and 0 for "no bet"
y = (rank_diff > 0) # 1 if higher ranked player won, -1 otherwise
#strategy 1: always bet on higher ranked player
if strategy==1:
return y*2-1
# strategies 2 and 3: bet on the higher ranked player if prob high
if strategy in [2,3]:
ProbW = 1/odds.PSW
ProbL = 1/odds.PSL
compProbs = np.where(y,ProbW,ProbL)
bet_placed_idx = (probs>compProbs)
return (y*2-1)*bet_placed_idx
# winner rank minus loser rank
rank_diff = (validation['winner_rank_points'] - validation['loser_rank_points']).values
# index variable: True if higher ranked player won, False otherwise
y = (rank_diff > 0)
# final validationset with two cols: difference in rankings, high ranked height minus low ranked height
X = np.abs(rank_diff)
# for numerical well-behavedness, we need to scale and center the validation
#X1=(X-np.mean(X,0))/np.std(X,axis=0)
X=X/np.std(X,axis=0)
probs = sigmoid(res.x*X)
odds = validation[['PSW','PSL']]
# loop over betting strategies
plotdata = {}
b = .25 # parameter for strategy #3
for i in np.arange(1,4):
key='r'+str(i)
value = calculate_daily_balances(probs,odds,i,b)
plotdata[key]=value
plot_bnd = len(validation)
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
x = np.arange(len(validation))
tick = np.ones(len(validation))
for i in np.arange(1,4):
ax.plot(x[0:plot_bnd],plotdata['r'+str(i)])
ax.legend(['Strategy 1','Strategy 2','Strategy 3'])
ax.plot(x[0:plot_bnd],len(validation)*tick[0:plot_bnd],'--r')
# # extract from MySQL database info on rank points and height for both winner and loser, store in dataframe
# with engine.begin() as connection:
# rawdata = pd.read_sql_query(SELECT winner_rank_points, loser_rank_points, winner_ht, loser_ht FROM matches \
# WHERE tourney_date < '20150101' \
# AND tourney_date > '20110101'
# AND winner_rank_points IS NOT NULL \
# AND loser_rank_points IS NOT NULL \
# AND winner_ht IS NOT NULL \
# AND loser_ht IS NOT NULL, connection)
rawdata = matches[['winner_rank_points','loser_rank_points','winner_ht','loser_ht']].dropna(how='any')
# winner rank minus loser rank
rank_diff = (rawdata['winner_rank_points'] - rawdata['loser_rank_points']).values
# winner height minus loser height
ht_diff = (rawdata['winner_ht']-rawdata['loser_ht']).values
# index variable: True if higher ranked player won, False otherwise
y = (rank_diff > 0)
# higher ranked height minus lower ranked height
rank_ht_diff = np.where(y==True, ht_diff,-ht_diff)
# final dataset with two cols: difference in rankings, high ranked height minus low ranked height
X = np.column_stack([np.abs(rank_diff), rank_ht_diff])
# for numerical well-behavedness, we need to scale and center the data
X=(X-np.nanmean(X,axis=0))/np.nanstd(X,axis=0)
# prepend column of 1s to X
X=np.insert(X,0,1,axis=1)
# number of "True" minus the number of "False"
nsamps = sum(y) - sum(~y)
# the smaller class can be associated with either "True" or "False"
smallclass = bool(nsamps<0)
# sample with replacement from data of the appropriate class
samps = X[np.random.choice(np.where(y==smallclass)[0],size=nsamps),:]
# augment the old data
X1 = np.concatenate((X,samps),axis=0)
y1 = np.concatenate((y,np.zeros(nsamps,dtype=bool)))
# plot the normalized data
fig, ax = plt.subplots(1,1)
ax.plot(X1[y1,1],X1[y1,2],"ro")
ax.plot(X1[~y1,1],X1[~y1,2],"bo")
ax.set_xlabel('Rank difference')
ax.set_ylabel('Height')
ax.set_title('Higher-rank-wins as a function of rank difference and height')
ax.legend(['High rank wins','Low rank wins'])
def sigmoid(z):
'''
Usage: sigmoid(z)
Description: Computes value of sigmoid function for scalar.
For vector or matrix, computes values of sigmoid function for each entry.
'''
return 1/(1+np.exp(-z));
# define a cost function
def costFunction(theta,X,y,lam):
'''
Computes the cost and gradient for logistic regression.
Input:
theta (3x1 vector of parameters)
X (nx3 matrix of feature values, first column all 1s)
y (nx1 binary vector of outcomes, 1=higher ranked player won, 0 otherwise)
lam (scalar: regularization paramter)
Output:
cost (scalar value of cost)
'''
# number of data points
m = len(y)
# make sure vectors are column vectors
theta = theta.reshape(-1,1)
y = y.reshape(-1,1)
# input to sigmoid function will be a column vector
z = np.dot(X,theta)
# cost function
J = (1/m)*np.sum(np.dot(-y.transpose(),np.log(sigmoid(z))) - \
np.dot((1-y.transpose()),np.log(1-sigmoid(z)))) + \
(lam/(2*m))*np.sum(theta[1:len(theta)+1]**2);
# gradient
regterm = np.insert(theta[1:len(theta)+1],0,0)
grad = (1/m)*np.sum((sigmoid(z) - y)*X,0) + (lam/m)*regterm
return J, grad
# check that cost function works
theta = np.array([1,2,3])
lam = 0
cost, grad = costFunction(theta, X1, y1*1,lam)
print("cost:", cost)
print("grad:", grad)
def callbackF(theta):
global NFeval
global X1
global y1
global lam
cost,grad = costFunction(theta,X1,y1*1,lam)
print("%4d %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f %3.6f" % \
(NFeval, theta[0], theta[1], theta[2], cost, grad[0], grad[1], grad[2]))
NFeval+=1
# run optimization
NFeval = 1
initial_theta = np.array([-5,4,3])
#initial_theta = np.array([1])
#print("iter t1 t2 t3 cost grad1 grad2 grad3")
#res = sp.optimize.minimize(lambda t: costFunction(t,X1,y1*1,lam), initial_theta, method='CG',\
# jac=True,options={'maxiter':100,'disp':True}, callback=callbackF)
res = sp.optimize.minimize(lambda t: costFunction(t,X1,y1*1,lam), initial_theta, method='CG',\
jac=True,options={'maxiter':100,'disp':True})
# plot the normalized data with regression line
theta = res.x
fig, ax = plt.subplots(1,1)
ax.plot(X1[y1,1],X1[y1,2],"ro",markerfacecolor="None")
ax.plot(X1[~y1,1],X1[~y1,2],"bo",markerfacecolor="None")
xplot = np.array([-3,3])
yplot = (-1/theta[2])*(theta[1]*xplot+theta[0])
ax.plot(xplot,yplot,'g',linewidth=2)
ax.set_xlabel('Rank difference')
ax.set_ylabel('Height')
ax.set_title('Higher-rank-wins as a function of age and height')
ax.set_ylim((-5,5))
rank_success_rate = np.sum(y)/len(y)
y_pred = (np.dot(X,theta)>0)
prediction_success_rate = np.sum(~(y^y_pred))/len(y)
print("prediction success rate: ", prediction_success_rate)
print("rank success rate: ", rank_success_rate)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=1., solver='lbfgs')
t = lr.fit(X1,y1*1)
y=t.predict(X)
cofs = t.coef_[0]
yplot = (-1/cofs[2])*(cofs[1]*xplot+cofs[0])
fig, ax = plt.subplots(1,1)
ax.plot(X1[y1,1],X1[y1,2],"ro",markerfacecolor="None")
ax.plot(X1[~y1,1],X1[~y1,2],"bo",markerfacecolor="None")
plt.plot(xplot,yplot)
ax.set_ylim([-5,5])
# we'll use the SVM package in the scikit library
from sklearn import svm
# produce a dense grid of points in rectangle around the data
def make_meshgrid(x, y, h=.02):
Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
# produce a contour plot with predicted outcomes from SVM classifier
def plot_contours(ax, clf, xx, yy, **params):
Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# # extract from MySQL database info on rank points and height for both winner and loser, store in dataframe
# with engine.begin() as connection:
# rawdata = pd.read_sql_query(SELECT winner_rank_points, loser_rank_points, winner_age, loser_age, winner_ht, loser_ht \
# FROM matches \
# WHERE tourney_date > '20170101' \
# AND winner_rank_points IS NOT NULL \
# AND loser_rank_points IS NOT NULL \
# AND winner_age IS NOT NULL \
# AND loser_age IS NOT NULL \
# AND winner_ht IS NOT NULL \
# AND loser_ht IS NOT NULL, connection)
# this nx2 array contains the differences in ages and the differences in height
X = pd.concat([rawdata.iloc[:,2]-rawdata.iloc[:,3], \
rawdata.iloc[:,4]-rawdata.iloc[:,5]], axis=1).values
# this nx1 binary array indicates whether the match was a "success" or a "failure", as predicted by ranking differences
y = (rawdata.iloc[:,0]-rawdata.iloc[:,1]).values > 0
# for numerical well-behavedness, we need to scale and center the data
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
# plot the normalized data
fig, ax = plt.subplots(1,1)
ax.plot(X[y,0],X[y,1],"ro")
ax.plot(X[~y,0],X[~y,1],"bo")
ax.set_xlabel('Age')
ax.set_ylabel('Height')
ax.set_title('Higher-rank-wins as a function of age and height')
# find the SVM classifier
clf = svm.SVC()
clf.fit(X, y)
# generate a dense grid for producing a contour plot
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
# feed the grid into the plot_contours routinge
fig, ax = plt.subplots(1, 1)
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Rank points')
ax.set_ylabel('First serve %')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title('SVM classifier for height/age data')
odds= pd.read_pickle('../data/pickle_files/odds.pkl')
matches= pd.read_pickle('../data/pickle_files/matches.pkl')
rawdata = pd.merge(matches,odds[['PSW','PSL','key_o']].dropna(axis=0,subset=["PSW"]),how='inner',on='key_o')
%qtconsole
investment = len(rawdata)
good_call_idx = (rawdata["LRank"]-rawdata["WRank"]>0)
winner_odds = rawdata["PSW"]
gain = sum(winner_odds*good_call_idx)
roi = gain - investment
print("total invested: ", investment)
print("return on investment: ", roi)
investment = len(rawdata)
good_call_idx = (rawdata["PSW"]-rawdata["PSL"]>0)
winner_odds = rawdata["PSW"]
gain = np.dot(winner_odds,good_call_idx)
roi = gain - investment
print("total invested: ", investment)
print("return on investment: ", roi)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I. <a name="logisticregression_1d"> One-dimensional Logistic regression
Step2: Load the data.
Step3: Separate data into training, validation, and test sets. (This division is not used for the plot above, but will be critical in assessing the performance of our learning algorithms.)
Step4: Defining "success" and "failure"
Step5: Define helper functions
Step6: The cost function is designed to take a regularization parameter lambda. For a non-regularized solution, lambda can be set equal to 0. The cost function returns both a cost and the gradient for any given value of parameters $\theta$.
Step7: Small test
Step8: For diagnostic purposes, we define a callback function that will print information about the state and gradient as the optimization algorithm proceeds.
Step9: Optimize
Step10: Plot the fit
Step11: Double check
Step14: Assessment
Step16: II. <a name="logisticregression_nd"> N-dimensional Logistic regression
Step17: Balance the numbers of correct and incorrect predictions via oversampling.
Step18: To perform the regression, we'll need to define the sigmoid function and a cost function. The former can take a scalar, vector, or matrix, and return the elementwise value of
Step19: The cost function is designed to take a regularization parameter lambda. For a non-regularized solution, lambda can be set equal to 0. The cost function returns both a cost and the gradient for any given value of parameters $\theta$.
Step20: Small test
Step21: For diagnostic purposes, we define a callback function that will print information about the state and gradient as the optimization algorithm proceeds.
Step22: Finally, we run the optimization.
Step23: To see how it did, we replot the data with the logistic classifier superimposed over the top.
Step24: Also develop a quantitative measure of success
Step25: Try to duplicate the above with sklearn's canned logistic algorithm
Step26: Comments
Step29: After classifying the SVM classifier, we'll need some helper functions to form contour plots. These helper functions are borrowed from the scikit documentation, http
Step31: We'll run our test on a slightly different set of data than last time. Here, we'll still classify matches as 1 if the higher ranked player wins and 0 otherwise, but we'll focus on age and height as our predictive features.
Step32: We'll use the scikit svm package to train an SVM classifier on this data. We'll plot the results as a contour graph.
Step33: Note that these features produce a fairly mixed set of points, so they are unlikely to be highly predictive. But it is interesting to note the pocket of blue in the lower left corner
Step34: As a point of departure, consider a betting strategy of betting one dollar on each match, where you bet on the candidate with the highest rank.
Step35: Alternatively, we could follow the same strategy, but bet on whichever candidate had the highest odds
|
7,260
|
<ASSISTANT_TASK:>
Python Code:
import networkx as nx
# Kode Anda di sini
# Kode Anda di sini
import numpy as np
class ExplodingGame(object):
def __init__(self, N):
self.N = N
# state = (player, number)
def start(self):
return (+1, 1)
def actions(self, state):
player, number = state
return ['+', '*']
def succ(self, state, action):
player, number = state
if action == '+':
return (-player, number + 2)
elif action == '*':
return (-player, np.ceil(number * 1.1))
assert False
def is_end(self, state):
player, number = state
return number > self.N
def utility(self, state):
player, number = state
assert self.is_end(state)
return player * float('inf')
def player(self, state):
player, number = state
return player
def add_policy(game, state):
action = '+'
print(f"add policy: state {state} => action {action}")
return action
def multiply_policy(game, state):
action = '*'
print(f"multiply policy: state {state} => action {action}")
return action
def random_policy(game, state):
pass
def minimax_policy(game, state):
pass
def expectimax_policy(game, state):
pass
# Kasus uji
game = ExplodingGame(N=10)
policies = {
+1: add_policy,
-1: multiply_policy
}
state = game.start()
while not game.is_end(state):
# Who controls this state?
player = game.player(state)
policy = policies[player]
# Ask policy to make a move
action = policy(game, state)
# Advance state
state = game.succ(state, action)
print(f"Utility di akhir permainan {game.utility(state)}")
!pip install pomegranate
from pomegranate import *
observed = [2,3,3,2,3,2,3,2,2,3,1,3,3,1,1,1,2,1,1,1,3,1,2,1,1,1,2,3,3,2,3,2,2]
# Kode anda di sini
# Kode Anda di sini
# Kode Anda di sini
# Kode Anda di sini
# Kode Anda di sini
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Soal 1.2.a (2 poin)
Step2: Soal 1.3 (2 poin)
Step3: Soal 2.1 (2 poin)
Step4: Soal 2.2 (3 poin)
Step5: Soal 2.3 (2 poin)
Step6: Soal 2.4 (3 poin)
Step7: Soal 3.1 (2 poin)
Step8: Soal 3.2 (2 poin)
Step9: Soal 3.3 (2 poin)
Step10: Soal 3.4 (2 poin)
Step11: Soal 3.5 (2 poin)
|
7,261
|
<ASSISTANT_TASK:>
Python Code:
from logbook import INFO, WARNING, DEBUG
import warnings
warnings.filterwarnings("ignore") # suppress h5py deprecation warning
import numpy as np
import os
import backtrader as bt
from btgym.research.casual_conv.strategy import CasualConvStrategyMulti
from btgym.research.casual_conv.networks import conv_1d_casual_attention_encoder
from btgym.algorithms.policy import StackedLstmPolicy
from btgym.algorithms.launcher.base import Launcher
from btgym.algorithms.aac import A3C
from btgym import PortfolioEnv
from btgym.datafeed.casual import BTgymCasualDataDomain
from btgym.datafeed.multi import BTgymMultiData
engine = bt.Cerebro()
num_features = 16
engine.addstrategy(
CasualConvStrategyMulti,
cash_name='EUR',
start_cash=2000,
commission=0.0001,
leverage=10.0,
asset_names={'USD', 'CHF', 'GBP', 'JPY'},
drawdown_call=10, # max % to loose, in percent of initial cash
target_call=10, # max % to win, same
skip_frame=10,
gamma=0.99,
state_ext_scale = {
'USD': np.linspace(1, 2, num=num_features),
'GBP': np.linspace(1, 2, num=num_features),
'CHF': np.linspace(1, 2, num=num_features),
'JPY': np.linspace(5e-3, 1e-2, num=num_features),
},
cwt_signal_scale=4e3,
cwt_lower_bound=4.0, # CWT scales TODO: 8.? (was : 3.)
cwt_upper_bound=90.0,
reward_scale=7,
)
data_config = {
'USD': {'filename': './data/DAT_ASCII_EURUSD_M1_2017.csv'},
'GBP': {'filename': './data/DAT_ASCII_EURGBP_M1_2017.csv'},
'JPY': {'filename': './data/DAT_ASCII_EURJPY_M1_2017.csv'},
'CHF': {'filename': './data/DAT_ASCII_EURCHF_M1_2017.csv'},
}
dataset = BTgymMultiData(
data_class_ref=BTgymCasualDataDomain,
data_config=data_config,
trial_params=dict(
start_weekdays={0, 1, 2, 3, 4, 5, 6},
sample_duration={'days': 30, 'hours': 0, 'minutes': 0},
start_00=False,
time_gap={'days': 15, 'hours': 0},
test_period={'days': 7, 'hours': 0, 'minutes': 0},
expanding=True,
),
episode_params=dict(
start_weekdays={0, 1, 2, 3, 4, 5, 6},
sample_duration={'days': 2, 'hours': 23, 'minutes': 55},
start_00=False,
time_gap={'days': 2, 'hours': 15},
),
frozen_time_split={'year': 2017, 'month': 3, 'day': 1},
)
#########################
env_config = dict(
class_ref=PortfolioEnv,
kwargs=dict(
dataset=dataset,
engine=engine,
render_modes=['episode'],
render_state_as_image=True,
render_size_episode=(12,16),
render_size_human=(9, 4),
render_size_state=(11, 3),
render_dpi=75,
port=5000,
data_port=4999,
connect_timeout=90,
verbose=0,
)
)
cluster_config = dict(
host='127.0.0.1',
port=12230,
num_workers=4, # Set according CPU's available or so
num_ps=1,
num_envs=1,
log_dir=os.path.expanduser('~/tmp/multi_continiuous'),
)
policy_config = dict(
class_ref=StackedLstmPolicy,
kwargs={
'action_dp_alpha': 200, # Dirichlet process sampling inverse variance
'lstm_layers': (256, 256),
'dropout_keep_prob': 1.0,
'encode_internal_state': False,
'conv_1d_num_filters': 64,
'share_encoder_params': False,
'state_encoder_class_ref': conv_1d_casual_attention_encoder,
}
)
trainer_config = dict(
class_ref=A3C,
kwargs=dict(
opt_learn_rate=1e-4,
opt_end_learn_rate=1e-5,
opt_decay_steps=50*10**6,
model_gamma=0.99,
model_gae_lambda=1.0,
model_beta=0.001, # entropy reg
rollout_length=20,
time_flat=True,
model_summary_freq=10,
episode_summary_freq=1,
env_render_freq=5,
)
)
env = PortfolioEnv(**env_config['kwargs'])
o = env.reset()
# Should manually normalize actions as we see it as sum of assets shares (or prob. distribution),
# but do not pose this constraint to action_space:
a = env.action_space.sample()
norm = np.asarray(list(a.values())).sum()
a_norm = {k: v/norm for k, v in a.items()}
for k, v in a_norm.items(): print('{}: {:1.2f}'.format(k, v[0]))
# NOTE: to see actual order execution feedback via `info` part one need to set strategy param: skip_frame=2,
o, r, d, i = env.step(a_norm)
for k, v in i[0].items(): print('{}: {}'.format(k, v))
env.close()
launcher = Launcher(
cluster_config=cluster_config,
env_config=env_config,
trainer_config=trainer_config,
policy_config=policy_config,
test_mode=False,
max_env_steps=100*10**6,
root_random_seed=0,
purge_previous=1, # ask to override previously saved model and logs
verbose=0
)
# Train it:
launcher.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem formulation
Step2: First, one can manually play with environment
Step3: Run training (do not expect it to converge though)
|
7,262
|
<ASSISTANT_TASK:>
Python Code:
import numpy
n = 10
A = numpy.random.random(n)
print(A)
s = 0
for i in range(n):
s += A[i]
print(s)
s = numpy.sum(A)
print(s)
n = 1000000
A = numpy.random.random(n)
def explicit_sum(seq):
s = 0
for elem in seq:
s += elem ** 2
return s
%timeit explicit_sum(A)
%timeit numpy.sum(A**2)
### Cython
%load_ext Cython
%%cython
cimport numpy as np
cimport cython
@cython.boundscheck(False)
@cython.wraparound(False)
def cython_sum(np.ndarray[double, ndim=1] A):
cdef double s = 0
cdef int i, n
n = len(A)
for i in range(n):
s += A[i] * A[i]
return s
%timeit cython_sum(A)
%%file pythran_sum.py
#pythran export pythran_sum(float64[])
import numpy
def pythran_sum(A):
return numpy.sum(A**2)
!python -m pythran.run pythran_sum.py -DUSE_BOOST_SIMD -O3 -march=native
!python -m timeit -s 'from pythran_sum import pythran_sum; import numpy; n = 1000000 ; A = numpy.random.random(n)' 'pythran_sum(A)'
n, m = 100, 200
A = numpy.random.random((n,m))
s = 0.
for i in range(1, n-1):
for j in range(1, m-1):
s += A[i,j]
print(s)
print(numpy.sum(A[1:-1, 1:-1]))
s = 0
for i in range(n):
for j in range(m):
if A[i,j] < .5:
s += 1
print(s)
print(numpy.sum(A < .5))
s = 0
B = numpy.empty_like(A)
for i in range(n):
for j in range(m):
if A[i,j] < .5:
B[i,j] = A[i,j]
else:
B[i,j] = 0.
print(B)
print(numpy.where(A<.5,A,0.))
### Piège!
n = 100
B = numpy.arange(100)
shift = 3
for i in range(shift, n):
B[i] = 1 + B[i - shift]
print(B)
B = numpy.arange(100)
B[shift:] = 1 + B[:-shift]
print(B)
%%file grayscott.py
#pythran export GrayScott(int, float, float, float, float)
import numpy as np
def GrayScott(counts, Du, Dv, F, k):
n = 300
U = np.zeros((n+2,n+2), dtype=np.float32)
V = np.zeros((n+2,n+2), dtype=np.float32)
u, v = U[1:-1,1:-1], V[1:-1,1:-1]
r = 20
u[:] = 1.0
U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50
V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25
u += 0.15*np.random.random((n,n))
v += 0.15*np.random.random((n,n))
for i in range(counts):
Lu = ( U[0:-2,1:-1] +
U[1:-1,0:-2] - 4*U[1:-1,1:-1] + U[1:-1,2:] +
U[2: ,1:-1] )
Lv = ( V[0:-2,1:-1] +
V[1:-1,0:-2] - 4*V[1:-1,1:-1] + V[1:-1,2:] +
V[2: ,1:-1] )
uvv = u*v*v
u += Du*Lu - uvv + F*(1 - u)
v += Dv*Lv + uvv - (F + k)*v
return V
from grayscott import GrayScott
%timeit GrayScott(40, 0.16, 0.08, 0.04, 0.06)
%%cython
cimport cython
import numpy as np
cimport numpy as np
cpdef CythonGrayScott(int counts, double Du, double Dv, double F, double k):
cdef int n = 300
cdef np.ndarray U = np.zeros((n+2,n+2), dtype=np.float_)
cdef np.ndarray V = np.zeros((n+2,n+2), dtype=np.float_)
cdef np.ndarray u = U[1:-1,1:-1]
cdef np.ndarray v = V[1:-1,1:-1]
cdef int r = 20
u[:] = 1.0
U[n/2-r:n/2+r,n/2-r:n/2+r] = 0.50
V[n/2-r:n/2+r,n/2-r:n/2+r] = 0.25
u += 0.15*np.random.random((n,n))
v += 0.15*np.random.random((n,n))
cdef np.ndarray Lu = np.zeros_like(u)
cdef np.ndarray Lv = np.zeros_like(v)
cdef int i, c, r1, c1, r2, c2
cdef double uvv
cdef double[:, ::1] bU = U
cdef double[:, ::1] bV = V
cdef double[:, ::1] bLu = Lu
cdef double[:, ::1] bLv = Lv
for i in range(counts):
for r in range(n):
r1 = r + 1
r2 = r + 2
for c in range(n):
c1 = c + 1
c2 = c + 2
bLu[r,c] = bU[r1,c2] + bU[r1,c] + bU[r2,c1] + bU[r,c1] - 4*bU[r1,c1]
bLv[r,c] = bV[r1,c2] + bV[r1,c] + bV[r2,c1] + bV[r,c1] - 4*bV[r1,c1]
for r in range(n):
r1 = r + 1
for c in range(n):
c1 = c + 1
uvv = bU[r1,c1]*bV[r1,c1]*bV[r1,c1]
bU[r1,c1] += Du*bLu[r,c] - uvv + F*(1 - bU[r1,c1])
bV[r1,c1] += Dv*bLv[r,c] + uvv - (F + k)*bV[r1,c1]
return V
%timeit GrayScott(40, 0.16, 0.08, 0.04, 0.06)
%timeit CythonGrayScott(40, 0.16, 0.08, 0.04, 0.06)
!python -m pythran.run -O3 -march=native grayscott.py -o pythran_grayscott.so
! python -m timeit -s 'from pythran_grayscott import GrayScott' 'GrayScott(40, 0.16, 0.08, 0.04, 0.06)'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fortran style
Step2: APL style
Step3: Une préférence ?
Step4: We don't need your loops!
Step5: Les scientifiquent codent en Numpy de haut niveau !
|
7,263
|
<ASSISTANT_TASK:>
Python Code::
import numpy as np
from pyspark.ml.recommendation import ALS
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,264
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
7,265
|
<ASSISTANT_TASK:>
Python Code:
# for testing if module is not in python-path
# import sys
# sys.path.append('/home/stephan/Repos/ENES-EUDAT/submission_forms')
# sys.path.append('C:\\Users\\Stephan Kindermann\\Documents\\GitHub\\submission_forms')
%load_ext autoreload
%autoreload 2
from IPython.display import display, Image
Image(filename='key-concepts.png')
from dkrz_forms import utils
prov_graph = utils.gen_prov_graph('/opt/jupyter/notebooks/form_directory/CORDEX/CORDEX_mm_mm.json','all')
#prov_graph = utils.gen_prov_graph('C:\\Users\\Stephan Kindermann\\Repos\\form_repo\\test\\test_testsuite_1234.json','all')
%matplotlib inline
prov_graph.plot()
# just for testing .. to be cleaned up ..
from dkrz_forms import form_handler
from dkrz_forms.config.project_config import PROJECT_DICT
test_dict = PROJECT_DICT['test']
from dkrz_forms.config.project_config import NAME_SPACES
# add namespaces for submission provenance capture
for key,value in NAME_SPACES.iteritems():
d1.add_namespace(key,value)
#d1.add_namespace()
# to do: look into some predefined vocabs, e.g. dublin core, iso19139,foaf etc.
d1.add_namespace("enes_entity",'http://www.enes.org/enes_entitiy#')
d1.add_namespace('enes_agent','http://www.enes.org/enes_agent#')
d1.add_namespace('data_collection','http://www.enes.org/enes_entity/file_collection')
d1.add_namespace('data_manager','http://www.enes.org/enes_agent/data_manager')
d1.add_namespace('data_provider','http://www.enes.org/enes_agent/data_provider')
d1.add_namespace('subm','http://www.enes.org/enes_entity/data_submsission')
d1.add_namespace('foaf','http://xmlns.com/foaf/0.1/')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model is along the concept described in https
Step2: Example name spaces
|
7,266
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
directory = '/input/'
with open(directory + 'reviews.txt', 'r') as f:
reviews = f.read()
with open(directory + 'labels.txt', 'r') as f:
labels = f.read()
reviews[:2000]
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
from collections import Counter
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for each in reviews:
reviews_ints.append([vocab_to_int[word] for word in each.split()])
labels = labels.split('\n')
labels = np.array([1 if each == 'positive' else 0 for each in labels])
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0]
len(non_zero_idx)
reviews_ints[-1]
reviews_ints = [reviews_ints[ii] for ii in non_zero_idx]
labels = np.array([labels[ii] for ii in non_zero_idx])
seq_len = 200
features = np.zeros((len(reviews_ints), seq_len), dtype=int)
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_len]
features[:10,:100]
split_frac = 0.8
split_idx = int(len(features)*0.8)
train_x, val_x = features[:split_idx], features[split_idx:]
train_y, val_y = labels[:split_idx], labels[split_idx:]
test_idx = int(len(val_x)*0.5)
val_x, test_x = val_x[:test_idx], val_x[test_idx:]
val_y, test_y = val_y[:test_idx], val_y[test_idx:]
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
lstm_size = 256
lstm_layers = 1
batch_size = 100
learning_rate = 0.001
n_words = len(vocab_to_int)
tf.reset_default_graph()
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')
labels_ = tf.placeholder(tf.int32, [None, None], name='labels')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 50
with graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
with graph.as_default():
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
with graph.as_default():
outputs, final_state = tf.nn.dynamic_rnn(cell, embed,
initial_state=initial_state)
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
test_acc = []
with tf.Session(graph=graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preprocessing
Step2: Encoding the words
Step3: Encoding the labels
Step4: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.
Step5: Turns out its the final review that has zero length. But that might not always be the case, so let's make it more general.
Step6: Exercise
Step7: Training, Validation, Test
Step8: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like
Step9: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
Step10: Embedding
Step11: LSTM cell
Step12: RNN forward pass
Step13: Output
Step14: Validation accuracy
Step15: Batching
Step16: Training
Step17: Testing
|
7,267
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
from exact_solvers import euler
from exact_solvers import euler_demos
from ipywidgets import widgets
from ipywidgets import interact
State = euler.Primitive_State
gamma = 1.4
interact(euler.plot_integral_curves,
gamma=widgets.FloatSlider(min=1.1,max=3,value=1.4),
rho_0=widgets.FloatSlider(min=0.1,max=3.,value=1.,
description=r'$\rho_0$'));
interact(euler.plot_hugoniot_loci,
gamma=widgets.FloatSlider(min=1.1,max=3,value=1.4),
rho_0=widgets.FloatSlider(min=0.1,max=3.,value=1.,
description=r'$\rho_0$'));
left_state = State(Density = 3.,
Velocity = 0.,
Pressure = 3.)
right_state = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
euler.riemann_solution(left_state,right_state)
euler.phase_plane_plot(left_state, right_state)
left_state = State(Density = 1.,
Velocity = -3.,
Pressure = 1.)
right_state = State(Density = 1.,
Velocity = 3.,
Pressure = 1.)
euler.riemann_solution(left_state,right_state);
euler.phase_plane_plot(left_state, right_state)
left_state = State(Density = 1.,
Velocity = 3.,
Pressure = 1.)
right_state = State(Density = 1.,
Velocity = -3.,
Pressure = 1.)
euler.riemann_solution(left_state,right_state)
euler.phase_plane_plot(left_state, right_state)
left_state = State(Density = 3.,
Velocity = 0.,
Pressure = 3.)
right_state = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
euler.plot_riemann_trajectories(left_state, right_state)
def plot_with_stripes_t_slider(t):
euler_demos.plot_with_stripes(rho_l=3.,u_l=0.,p_l=3.,
rho_r=1.,u_r=0.,p_r=1.,
gamma=gamma,t=t)
interact(plot_with_stripes_t_slider,
t=widgets.FloatSlider(min=0.,max=1.,step=0.1,value=0.5));
euler_demos.euler_demo1(rho_l=2.,u_l=0.,p_l=2.5,
rho_r=3.,u_r=0.,p_r=5., gamma=gamma)
left_state = State(Density =0.,
Velocity = 0.,
Pressure = 0.)
right_state = State(Density = 1.,
Velocity = -3.,
Pressure = 1.)
euler.riemann_solution(left_state,right_state)
euler.phase_plane_plot(left_state, right_state)
left_state = State(Density =1.,
Velocity = -10.,
Pressure = 1.)
right_state = State(Density = 1.,
Velocity = 10.,
Pressure = 1.)
euler.riemann_solution(left_state,right_state)
euler.phase_plane_plot(left_state, right_state)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you wish to examine the Python code for this chapter, see
Step2: Rankine-Hugoniot jump conditions
Step3: Entropy condition
Step4: Here is a plot of the solution in the phase plane, showing the integral curve connecting the left and middle states, and the Hugoniot locus connecting the middle and right states.
Step5: Problem 2
Step6: Problem 3
Step7: Plot particle trajectories
Step8: Since the distance between particles in the above plot is inversely proportional to density, we see that the density around a particle increases as it goes through the shock wave but decreases through the rarefaction wave, and that in general there is a jump in density across the contact discontinuity, which lies along the particle trajectory emanating from $x=0$ at $t=0$.
Step9: Note the following in the figure above
Step10: Riemann problems with vacuum
Step11: The phase plane plot may look odd, but recall that in the vacuum state velocity is undefined, and since $p_\ell = p_m = 0$, the left and middle states are actually the same.
|
7,268
|
<ASSISTANT_TASK:>
Python Code:
primes = []
i = 2
while len(primes) < 25:
for p in primes:
if i % p == 0:
break
else:
primes.append(i)
i += 1
print(primes)
def square(val):
print(val)
return val ** 2
squared_numbers = [square(i) for i in range(5)]
print('Squared from list:')
print(squared_numbers)
squared_numbers = (square(i) for i in range(5))
print('Squared from iterable:')
print(squared_numbers)
def squared_numbers(num):
for i in range(num):
yield i ** 2
print('This is only printed after all the numbers output have been consumed')
print(squared_numbers(5))
for i in squared_numbers(5):
print(i)
import functools
def plus(val, n):
return val + n
f = functools.partial(plus, 5)
f(5)
def decorator(inner):
def inner_decorator():
print('before')
inner()
print('after')
return inner_decorator
def decorated():
print('decorated')
f = decorator(decorated)
f()
@decorator
def decorated():
print('decorated')
decorated()
import time
@functools.lru_cache()
def slow_compute(n):
time.sleep(1)
print(n)
start = time.time()
slow_compute(1)
print(time.time() - start)
start = time.time()
slow_compute(1)
print(time.time() - start)
start = time.time()
slow_compute(2)
print(time.time() - start)
DISCUSSED IN CLASS
Classes:
data
style
source
axes
grid
legend
points
lines
- draw
- position
- color
- size / thickness
- symbol
- slope / intercept
- text representation
figure
plot
title
labels
text
DISCUSSED IN CLASS
classes:
appetizers
main courses
desserts
recipe
__Attributes/Methods__
- preparation time
- cooking time
- serving portion
- servings
- name
- nutritional information
- ingredients
- methods
- scaling
- equipment
- special occasion
- instructions
__Interactions__
- Cookbook
- Ingredients
- Instructions
- Nutrition
- Equipment
- Scaling
- Portions
servings
ingredients
- quantity (mass/volume)
- units
- nutritional info
- specific dietary info
- cost
- storage temperature
- sustitute options
- name
- translations
- alternative names
- special handling instructions
- sources
* can interact with the following classes:
* recipe
* Instructions
* Equipment
* nutrition
* scaling
* portions
* cookbook
* units
solid ingredients
liquid ingredients
equipment
instructions
nutritional info
style
units conversion
scaling
portions
cookbook
(recipe class can have an attribute, list of ingredients which are instances of another class themselves)
(classes interact with each other often in an app like this)
(instructions can be a class, and an instance of it can be saved as an attribute in Recipe class)
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
mammal = True
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
print(person1, person2)
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
mammal = True
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
def __str__(self):
return '{0} who is {1} years old.'.format(self.name, self.age)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
print(person1, person2)
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
friends = []
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
def __str__(self):
return '{0} who is {1} years old'.format(self.name, self.age)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
person1.friends.append('Charlie')
person2.friends.append('Danielle')
print(person1.friends, person2.friends)
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
self.friends = []
def __str__(self):
return '{0} who is {1} years old'.format(self.name, self.age)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
person1.friends.append('Charlie')
person2.friends.append('Danielle')
print(person1.friends, person2.friends)
print('This works:', person1.friends)
print('This does not work:', friends)
class Person(object):
A class definition for a person. The following attributes are supported:
Attributes:
name: A string representing the person's name.
age: An integer representing the person's age.
def __init__(self, name, age):
Return a Person object with name and age set to the values supplied
self.name = name
self.age = age
self.friends = []
def __str__(self):
Return a string representation of the object
return '{0} who is {1} years old'.format(self.name, self.age)
def add_friend(self, friend):
Add a friend
self.friends.append(friend)
person1 = Person('Alice', 25)
person2 = Person('Bob', 30)
person1.add_friend('Charlie')
person2.add_friend('Danielle')
print(person1.friends, person2.friends)
class Test(object):
def do_something(self):
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Functional
Step2: Object oriented
Step3: Exercise 2
Step6: Building Skills in Object Oriented Design is a good resource to learn more about this process.
Step9: There is a lot happening above.
Step13: There are many more special methods.
Step16: Both of our objects point to the same instance of the list type so adding a new friend to either object shows up in both.
Step17: Objects have their own namespace, although we have created variables called name, age, and friends they can only be accessed in the context of the object.
Step22: We are not limited to special methods when creating classes. Standard functions, or in this context methods, are an integral part of object oriented programming. Their definition is identical to special methods and functions outside of classes.
Step23: Private vs Public
|
7,269
|
<ASSISTANT_TASK:>
Python Code:
from dolfin import *
from rbnics import *
@EIM("online")
@ExactParametrizedFunctions("offline")
class NonlinearElliptic(NonlinearEllipticProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
NonlinearEllipticProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
self.du = TrialFunction(V)
self.u = self._solution
self.v = TestFunction(V)
self.dx = Measure("dx")(subdomain_data=self.subdomains)
self.ds = Measure("ds")(subdomain_data=self.boundaries)
# Store the forcing term expression
self.f = Expression("sin(2*pi*x[0])*sin(2*pi*x[1])", element=self.V.ufl_element())
# Customize nonlinear solver parameters
self._nonlinear_solver_parameters.update({
"linear_solver": "mumps",
"maximum_iterations": 20,
"report": True
})
# Return custom problem name
def name(self):
return "NonlinearEllipticEIM"
# Return theta multiplicative terms of the affine expansion of the problem.
@compute_theta_for_derivatives
def compute_theta(self, term):
mu = self.mu
if term == "a":
theta_a0 = 1.
return (theta_a0,)
elif term == "c":
theta_c0 = mu[0]
return (theta_c0,)
elif term == "f":
theta_f0 = 100.
return (theta_f0,)
elif term == "s":
theta_s0 = 1.0
return (theta_s0,)
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
def assemble_operator(self, term):
v = self.v
dx = self.dx
if term == "a":
du = self.du
a0 = inner(grad(du), grad(v)) * dx
return (a0,)
elif term == "c":
u = self.u
mu = self.mu
c0 = (exp(mu[1] * u) - 1) / mu[1] * v * dx
return (c0,)
elif term == "dc": # preferred over derivative() computation which does not cancel out trivial mu[1] factors
du = self.du
u = self.u
mu = self.mu
dc0 = exp(mu[1] * u) * du * v * dx
return (dc0,)
elif term == "f":
f = self.f
f0 = f * v * dx
return (f0,)
elif term == "s":
s0 = v * dx
return (s0,)
elif term == "dirichlet_bc":
bc0 = [DirichletBC(self.V, Constant(0.0), self.boundaries, 1)]
return (bc0,)
elif term == "inner_product":
du = self.du
x0 = inner(grad(du), grad(v)) * dx
return (x0,)
else:
raise ValueError("Invalid term for assemble_operator().")
# Customize the resulting reduced problem
@CustomizeReducedProblemFor(NonlinearEllipticProblem)
def CustomizeReducedNonlinearElliptic(ReducedNonlinearElliptic_Base):
class ReducedNonlinearElliptic(ReducedNonlinearElliptic_Base):
def __init__(self, truth_problem, **kwargs):
ReducedNonlinearElliptic_Base.__init__(self, truth_problem, **kwargs)
self._nonlinear_solver_parameters.update({
"report": True,
"line_search": "wolfe"
})
return ReducedNonlinearElliptic
mesh = Mesh("data/square.xml")
subdomains = MeshFunction("size_t", mesh, "data/square_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/square_facet_region.xml")
V = FunctionSpace(mesh, "Lagrange", 1)
problem = NonlinearElliptic(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [(0.01, 10.0), (0.01, 10.0)]
problem.set_mu_range(mu_range)
reduction_method = PODGalerkin(problem)
reduction_method.set_Nmax(20, EIM=21)
reduction_method.set_tolerance(1e-8, EIM=1e-4)
reduction_method.initialize_training_set(50, EIM=60)
reduced_problem = reduction_method.offline()
online_mu = (0.3, 9.0)
reduced_problem.set_mu(online_mu)
reduced_solution = reduced_problem.solve()
plot(reduced_solution, reduced_problem=reduced_problem)
reduction_method.initialize_testing_set(50, EIM=60)
reduction_method.error_analysis()
reduction_method.speedup_analysis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine Decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element space (Lagrange P1)
Step4: 4.3. Allocate an object of the NonlinearElliptic class
Step5: 4.4. Prepare reduction with a POD-Galerkin method
Step6: 4.5. Perform the offline phase
Step7: 4.6. Perform an online solve
Step8: 4.7. Perform an error analysis
Step9: 4.8. Perform a speedup analysis
|
7,270
|
<ASSISTANT_TASK:>
Python Code:
from fastai.tabular import *
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv')
dep_var = 'salary'
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [FillMissing, Categorify, Normalize]
test = TabularList.from_df(df.iloc[800:1000].copy(), path=path, cat_names=cat_names, cont_names=cont_names)
data = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(list(range(800,1000)))
.label_from_df(cols=dep_var)
.add_test(test)
.databunch())
data.show_batch(rows=10)
learn = tabular_learner(data, layers=[200,100], metrics=accuracy)
learn.fit(1, 1e-2)
row = df.iloc[0]
learn.predict(row)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tabular data should be in a Pandas DataFrame.
Step2: Inference 预测
|
7,271
|
<ASSISTANT_TASK:>
Python Code:
def isProduct(arr , n , x ) :
if n < 2 :
return False
s = set()
for i in range(0 , n ) :
if arr[i ] == 0 :
if x == 0 :
return True
else :
continue
if x % arr[i ] == 0 :
if x // arr[i ] in s :
return True
s . add(arr[i ] )
return False
if __name__== "__main __":
arr =[10 , 20 , 9 , 40 ]
x = 400
n = len(arr )
if isProduct(arr , n , x ) :
print("Yes ")
else :
print("No ")
x = 190
if isProduct(arr , n , x ) :
print("Yes ")
else :
print("No ")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,272
|
<ASSISTANT_TASK:>
Python Code:
import copy
import glob
import os
import subprocess
import cdpybio as cpb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import pybedtools as pbt
import seaborn as sns
import socket
import statsmodels.stats.multitest as smm
import vcf as pyvcf
import cardipspy as cpy
import ciepy
%matplotlib inline
%load_ext rpy2.ipython
dy_name = 'eqtl_analysis'
import socket
if socket.gethostname() == 'fl-hn1' or socket.gethostname() == 'fl-hn2':
dy = os.path.join(ciepy.root, 'sandbox', dy_name)
cpy.makedir(dy)
pbt.set_tempdir(dy)
outdir = os.path.join(ciepy.root, 'output', dy_name)
cpy.makedir(outdir)
private_outdir = os.path.join(ciepy.root, 'private_output', dy_name)
cpy.makedir(private_outdir)
transcript_to_gene = pd.read_table(cpy.gencode_transcript_gene, header=None,
squeeze=True, index_col=0)
gene_info = pd.read_table(cpy.gencode_gene_info, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'eqtl_input',
'tpm_log_filtered_phe_std_norm_peer_resid.tsv')
exp = pd.read_table(fn, index_col=0)
dy = os.path.join(ciepy.root, 'output/eqtl_processing/eqtls01')
fn = os.path.join(dy, 'qvalues.tsv')
qvalues = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'lead_variants.tsv')
lead_vars = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'lead_variants_single.tsv')
lead_vars_single = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'gene_variant_pairs.tsv')
gene_variant = pd.read_table(fn, index_col=0)
dy = os.path.join(ciepy.root, 'output/eqtl_processing/eqtls01')
h2 = pd.read_table(os.path.join(dy, 'h2.tsv'), index_col=0, squeeze=True, header=None)
dy = os.path.join(ciepy.root, 'output/eqtl_processing/no_peer01')
#h2_no_peer = pd.read_table(os.path.join(dy, 'h2.tsv'), index_col=0, squeeze=True, header=None)
gold_eqtls = pd.read_table(
os.path.join(ciepy.root, 'output', 'eqtl_methods_exploration', 'gold_eqtls.tsv'),
index_col=0)
fn = os.path.join(ciepy.root, 'output/eqtl_processing/eqtls02', 'lead_variants.tsv')
lead_vars_second = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output/eqtl_processing/eqtls03', 'lead_variants.tsv')
lead_vars_third = pd.read_table(fn, index_col=0)
print('We detected eQTLs for {:,.0f} of {:,} genes tested.'.format(qvalues.perm_sig.sum(),
qvalues.shape[0]))
for i in range(1, 4):
fn = os.path.join(ciepy.root, 'output/eqtl_processing/eqtls0{}/qvalues.tsv'.format(i))
tdf = pd.read_table(fn, index_col=0)
print('{:,.0f} of {:,} significant genes have eQTLs for analysis {}.'.format(
tdf.perm_sig.sum(), tdf.shape[0], i))
print('Number of genes with variant type as lead variant (ties allowed):')
s = set(lead_vars[lead_vars.perm_sig].gene_id + ':' + lead_vars[lead_vars.perm_sig].variant_type)
pd.Series([x.split(':')[1] for x in s]).value_counts()
761 + 615
print('Number of genes with lead variant from each caller (ties allowed):')
s = set(lead_vars[lead_vars.perm_sig].gene_id + ':' + lead_vars[lead_vars.perm_sig].variant_caller)
pd.Series([x.split(':')[1] for x in s]).value_counts()
n = sum(gene_variant.marker_id.apply(lambda x: 'CNV' in x))
print('Total number of CNV eQTLs: {:,}'.format(n))
sig = lead_vars[lead_vars.perm_sig]
sig_single = lead_vars[lead_vars.perm_sig]
a = gene_variant.shape[0]
b = len(set(gene_variant.location))
print('{:,} total variant-expression associations comprising {:,}'
' unique variants.'.format(a, b))
fig, axs = plt.subplots(2, 2, figsize=(10, 10))
axs = axs.flatten()
for i, g in enumerate(list(set(sig.gene_id) & set(gold_eqtls.index))[0:4]):
ax = axs[i]
fn = os.path.join(ciepy.root, 'private_output', 'run_eqtl_analysis', 'eqtls01',
'gene_results', g, '{}.tsv'.format(g))
min_fn = os.path.join(ciepy.root, 'private_output', 'run_eqtl_analysis', 'eqtls01',
'gene_results', g, 'minimum_pvalues.tsv')
if os.path.exists(min_fn):
res = ciepy.read_emmax_output(fn)
#t = res[res.MARKER_ID.apply(lambda x: gold_eqtls.ix[g, 'rsid'] in x)]
t = res[res.BEG == gold_eqtls.ix[g, 'start']]
min_pvals = pd.read_table(min_fn, header=None, squeeze=True)
ax = (-np.log10(min_pvals)).hist(ax=ax)
ax.set_ylabel('Number of $p$-values')
ax.set_xlabel('$-\log_{10}$ $p$-value')
ya, yb = ax.get_ylim()
ax.vlines(-np.log10(t.PVALUE), ya, yb, label='GTEx lead variant', alpha=0.5, linestyle='--')
ax.vlines(-np.log10(res.PVALUE.min()), ya, yb, colors=['red'], label='min $p$-value', alpha=0.5)
ax.set_title('{}'.format(gene_info.ix[g, 'gene_name']));
lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.tight_layout()
fig.savefig(os.path.join(outdir, 'gtex_comparison.pdf'),
bbox_extra_artists=(lgd,), bbox_inches='tight')
n,b,p = plt.hist(sig.tss_dist,
bins=np.arange(-500000, 510000, 10000),
normed=True, histtype='stepfilled',
linewidth=0)
plt.title('Distance from nearest TSS')
plt.ylabel('Fraction of eQTLs')
plt.xlabel('Distance in base pairs');
n,b,p = plt.hist(sig[sig.variant_type == 'ins'].tss_dist,
bins=np.arange(-1000000, 1010000, 10000),
normed=True, histtype='stepfilled', linewidth=0)
plt.title('Distance from nearest TSS')
plt.ylabel('Fraction of eQTLs')
plt.xlabel('Distance in base pairs');
n,b,p = plt.hist(sig[sig.variant_type == 'del'].tss_dist,
bins=np.arange(-1000000, 1010000, 10000),
normed=True, histtype='stepfilled', linewidth=0)
plt.title('Distance from nearest TSS')
plt.ylabel('Fraction of eQTLs')
plt.xlabel('Distance in base pairs');
n,b,p = plt.hist(sig[sig.variant_type == 'cnv'].tss_dist,
bins=np.arange(-1000000, 1010000, 10000),
normed=True, histtype='stepfilled', linewidth=0)
plt.title('Distance from nearest TSS')
plt.ylabel('Fraction of eQTLs')
plt.xlabel('Distance in base pairs');
ax = sns.jointplot(sig_single.tss_dist / 1000,
-np.log10(sig_single.pvalue),
stat_func=None, alpha=0.25)
ax.set_axis_labels(xlabel='Distance in kb',
ylabel='$-\log_{10}$ $p$-value')
plt.tight_layout()
plt.savefig(os.path.join(outdir, 'sig_pvalue_tss_dist.pdf'))
ax = sns.jointplot(sig_single.tss_dist / 1000,
sig_single.beta.abs(),
stat_func=None, alpha=0.25)
ax.set_axis_labels(xlabel='Distance in kb', ylabel='abs$(\\beta)$')
plt.tight_layout()
plt.savefig(os.path.join(outdir, 'sig_beta_tss_dist.pdf'))
dists = sig_single.tss_dist
a = dists[dists >= 0]
b = -dists[dists < 0]
r = plt.hist(list(np.log10(a + 1)) + list(-np.log10(b + 1)),
bins=100)
plt.title('Distance from nearest TSS')
plt.ylabel('Number of eQTLs')
plt.xlabel('Distance in $\log_{10}$ base pairs')
ya, yb = plt.ylim()
plt.vlines(np.log10(50000), ya, yb, linestyles='--')
plt.vlines(-np.log10(50000), ya, yb, linestyles='--')
plt.vlines(np.log10(3500), ya, yb, linestyles='--', color='red')
plt.vlines(-np.log10(3500), ya, yb, linestyles='--', color='red')
plt.tight_layout()
plt.savefig(os.path.join(outdir, 'sig_snv_tss_log_dist.pdf'))
t = pd.DataFrame(0, index=set(gene_info.gene_type), columns=['sig', 'not_sig'])
vc = gene_info.ix[qvalues[qvalues.perm_sig].index, 'gene_type'].value_counts()
t.ix[vc.index, 'sig'] = vc
vc = gene_info.ix[qvalues[qvalues.perm_sig == False].index, 'gene_type'].value_counts()
t.ix[vc.index, 'not_sig'] = vc
t = t[t.sum(axis=1) > 1]
t['total'] = t.sum(axis=1)
#t.sort_values(by='total', inplace=True, ascending=False)
t.sort_values(by='total', inplace=True)
t.drop('protein_coding')[['sig', 'not_sig']].plot(kind='barh')
plt.xlabel('Number of genes');
fn = os.path.join(ciepy.root, 'private_output', 'eqtl_input',
'filtered_all', '0000.vcf.gz')
vcf_reader = pyvcf.Reader(open(fn), compressed=True)
res_fns = glob.glob(os.path.join(ciepy.root, 'private_output', 'run_eqtl_analysis', 'eqtls01',
'gene_results', '*', 'ENS*.tsv'))
res_fns = pd.Series(res_fns,
index=[os.path.splitext(os.path.split(x)[1])[0] for x in res_fns])
qvalue_sig = qvalues[qvalues.perm_sig == 1]
qvalue_sig = qvalue_sig.sort_values('perm_qvalue')
def eqtl_summary(gene_id, left=None, right=None, fn_root=None):
res = ciepy.read_emmax_output(res_fns[gene_id])
res = res.sort_values('PVALUE')
ind = sig_single[sig_single.gene_id == gene_id].index[0]
if gene_info.ix[gene_id, 'strand'] == '+':
gene_left = gene_info.ix[gene_id, 'start']
gene_right = gene_info.ix[gene_id, 'end']
else:
gene_right = gene_info.ix[gene_id, 'start']
gene_left = gene_info.ix[gene_id, 'end']
if left:
res = res[res.BEG >= gene_left - left]
if right:
res = res[res.BEG <= gene_right + right]
plt.figure()
plt.scatter(res.BEG, -np.log10(res.PVALUE), color='grey', alpha=0.5)
plt.xlim(res.BEG.min(), res.BEG.max())
ya = 0
yb = (-np.log10(res.PVALUE)).max() + 1
plt.ylim(ya, yb)
# plt.vlines(most_sig_single.ix[g, 'tss_start'], ya, yb, color='blue',
# linestyles='--', alpha=0.5)
poly = plt.Polygon([[gene_left, 0.9 * yb], [gene_left, yb], [gene_right, 0.95 * yb]],
closed=True, facecolor='blue', edgecolor='none')
plt.gca().add_patch(poly)
#most_sig_single.ix[g, 'tss_start']
plt.xlabel('Genomic position')
plt.ylabel('$-\log_{10}$ $p$-value')
plt.title('Manhattan plot for {}'.format(gene_info.ix[gene_id, 'gene_name']));
if fn_root:
plt.tight_layout()
plt.savefig('{}_manhattan.pdf'.format(fn_root))
t = vcf_reader.fetch(res.CHROM.values[0],
res.BEG.values[0],
res.BEG.values[0] + 1)
r = t.next()
tdf = pd.DataFrame(exp.ix[gene_id])
tdf.columns = ['expression']
tdf['genotype'] = 0
hets = set(exp.columns) & set([s.sample for s in r.get_hets()])
tdf.ix[hets, 'genotype'] = 1
alts = set(exp.columns) & set([s.sample for s in r.get_hom_alts()])
tdf.ix[alts, 'genotype'] = 2
plt.figure()
#sns.lmplot(x='genotype', y='expression', data=tdf)
ax = sns.violinplot(x='genotype', y='expression', data=tdf, color='grey',
order=[0, 1, 2], scale='count')
sns.regplot(x='genotype', y='expression', data=tdf, scatter=False, color='red')
ya, yb = plt.ylim()
plt.text(0, yb, 'n={}'.format(sum(tdf.genotype == 0)),
verticalalignment='top', horizontalalignment='center')
plt.text(1, yb, 'n={}'.format(sum(tdf.genotype == 1)),
verticalalignment='top', horizontalalignment='center')
plt.text(2, yb, 'n={}'.format(sum(tdf.genotype == 2)),
verticalalignment='top', horizontalalignment='center')
plt.title('Expression vs. genotype for {}'.format(gene_info.ix[gene_id, 'gene_name']));
if fn_root:
plt.tight_layout()
plt.savefig('{}_violin.pdf'.format(fn_root))
eqtl_summary(sig.gene_id[0])
eqtl_summary(sig.gene_id[5])
eqtl_summary('ENSG00000100897.13')
eqtl_summary('ENSG00000173992.4')
fn = os.path.join(ciepy.root, 'misc', 'stem_cell_population_maintenance.tsv')
a = pd.read_table(fn, header=None)
fn = os.path.join(ciepy.root, 'misc', 'regulation_of_stem_cell_population_maintenance.tsv')
b = pd.read_table(fn, header=None)
go_genes = set(a[2]) | set(b[2])
go_genes = go_genes & set(gene_info.gene_name)
n = len(set(a[2]) & set(gene_info.ix[qvalues[qvalues.perm_sig].index, 'gene_name']))
print('{} eGenes of {} from stem cell population maintenance.'.format(n, a.shape[0]))
n = len(set(b[2]) & set(gene_info.ix[qvalues[qvalues.perm_sig].index, 'gene_name']))
print('{} eGenes of {} from regulation of stem cell population maintenance.'.format(n, b.shape[0]))
n = len(set(a[2]) & set(b[2]) & set(gene_info.ix[qvalues[qvalues.perm_sig].index, 'gene_name']))
print('{} eGenes shared.'.format(n))
pgenes = ['LIN28A', 'POU5F1', 'SOX2', 'NANOG', 'MYC', 'KLF4', 'ZFP42']
for g in pgenes:
i = gene_info[gene_info.gene_name == g].index[0]
if i in sig.gene_id.values:
eqtl_summary(i, fn_root=os.path.join(outdir, '{}_summary'.format(g)))
# Markers of pluripotency from http://www.nature.com/nbt/journal/v33/n11/full/nbt.3387.html.
pgenes = ['CXCL5', 'IDO1', 'LCK', 'TRIM22', 'DNMT3B', 'HESX1', 'SOX2', 'POU5F1', 'NANOG']
for g in pgenes:
i = gene_info[gene_info.gene_name == g].index[0]
if i in sig.gene_id.values:
eqtl_summary(i, fn_root=os.path.join(outdir, '{}_summary'.format(g)))
genes = [u'BRIX1 ', u'CD9', u'COMMD3 ', u'CRABP2 ', u'CXCL5', u'DIAPH2',
u'DNMT3B', u'EDNRB ', u'FGF4 ', u'FGF5 ', u'FOXD3 ', u'GABRB3 ',
u'GAL ', u'GBX2', u'GDF3 ', u'GRB7 ', u'HCK ', u'HESX1', u'IDO1',
u'IFITM1 ', u'IFITM2', u'IGF2BP2', u'IL6ST ', u'KIT ', u'LCK', u'LIFR ',
u'LIN28A', u'NANOG', u'NODAL ', u'NOG', u'NR5A2', u'NR6A1', u'PODXL',
u'POU5F1', u'PTEN ', u'SEMA3A', u'SFRP2 ', u'SOX2', u'TDGF1', u'TERT',
u'TFCP2L1', u'TRIM22', u'UTF1', u'ZFP42']
for g in genes:
if g in gene_info.gene_name.values:
i = gene_info[gene_info.gene_name == g].index[0]
if i in sig.gene_id:
eqtl_summary(i)#, fn_root=os.path.join(outdir, '{}_summary'.format(g)))
pgenes = ['LIN28A', 'POU5F1', 'SOX2', 'NANOG', 'MYC', 'KLF4', 'ZFP42']
t = set(lead_vars_second.ix[lead_vars_second.perm_sig, 'gene_name'])
for g in pgenes:
if g in t:
print(g)
def ld_vs_pval(gene_id):
ind = lead_vars_single[lead_vars_single.gene_id == gene_id].index[0]
fn = '/publicdata/1KGP_20151103/LD/tabix/{}_EUR_ld_all.hap.ld.bed.gz'.format(sig_single.ix[ind, 'chrom'])
c = 'tabix {} {}:{}-{}'.format(fn, lead_vars_single.ix[ind].chrom,
sig_single.ix[ind].start - 1, sig_single.ix[ind].end)
res = subprocess.check_output(c, shell=True)
tdf = pd.DataFrame([x.split() for x in res.strip().split('\n')],
columns=['chrom', 'start', 'end', 'ld_info'])
se = pd.Series(tdf['ld_info'].apply(lambda x: x.split(':')).apply(lambda x: x[2]).values,
index=tdf['ld_info'].apply(lambda x: x.split(':')).apply(lambda x: x[1])).astype(float)
res = ciepy.read_emmax_output(res_fns[gene_id])
res.index = res.BEG.astype(str)
plt.scatter(se.values, -np.log10(res.ix[se.index, 'PVALUE'].values),
color='grey', alpha=0.5, s=100)
xa, xb = plt.xlim()
plt.hlines(-np.log10(lead_vars_single.ix[ind, 'pvalue']), xa, xb,
color='red', linestyle='--')
plt.xlim(xa, xb)
plt.ylabel('$-\log_{10}$ $p$-value')
plt.xlabel('LD with most significant SNV');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Summary
Step2: Comparison to GTEx Multi-Tissue eQTLs
Step3: The black line shows the $p$-value for the GTEx SNV. The red line shows the smallest $p$-value that
Step4: The above two plots are the same data at different $x$-axis limits. The first plot
Step5: Grubert et al. defined distal as greater than 50 kb away (outside of the black lines above).
Step6: Manhattan and Expression Plots
Step7: Pluripotency Genes
Step8: POU5F1 has a second eQTL as well.
|
7,273
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import PyDealII.Debug as dealii
triangulation = dealii.Triangulation('2D')
triangulation.generate_hyper_cube()
triangulation.refine_global(2)
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import numpy as np
def plot_triangulation(triangulation, color_scheme):
fig, ax = plt.subplots()
patches = []
colors = []
cell_id = 0
for cell in triangulation.active_cells():
quad_vertices = np.zeros((4,2))
# The shift variable is used to reorder the vertices because
# deal.II and matplotlib require different ordering
shift = [0,1,3,2]
for i in range(4):
vertex = cell.get_vertex(i)
quad_vertices[shift[i]][0] = vertex.x
quad_vertices[shift[i]][1] = vertex.y
quad = Polygon(quad_vertices, closed=True)
patches.append(quad)
colors.append(color_scheme(cell_id, cell))
cell_id += 1
p = PatchCollection(patches)
p.set_array(np.array(colors))
ax.add_collection(p, autolim=True)
ax.autoscale_view()
plt.show()
def color_sc(cell_id, cell):
return cell_id
plot_triangulation(triangulation, color_sc)
for cell in triangulation.active_cells():
if cell.barycenter().x < 0.5:
cell.material_id = 0
else:
cell.material_id = 1
plot_triangulation(triangulation, lambda cell_id,cell : cell.material_id)
for cell in triangulation.active_cells():
if cell.material_id == 0:
cell.refine_flag ='isotropic'
triangulation.execute_coarsening_and_refinement()
plot_triangulation(triangulation, color_sc)
triangulation_2 = dealii.Triangulation('2D')
triangulation_2.generate_hyper_cube()
triangulation_2.refine_global(2)
triangulation_2.shift([2.,0.])
plot_triangulation(triangulation_2, color_sc)
flatten_triangulation_1 = dealii.Triangulation('2D')
triangulation.generate_hyper_cube()
triangulation.refine_global(2)
triangulation.flatten_triangulation(flatten_triangulation_1)
flatten_triangulation_2 = dealii.Triangulation('2D')
triangulation_2.flatten_triangulation(flatten_triangulation_2)
triangulation_3 = dealii.Triangulation('2D')
triangulation_3.merge_triangulations(flatten_triangulation_1, flatten_triangulation_2)
plot_triangulation(triangulation_3, color_sc)
flatten_triangulation_2.shift([-1.,0])
triangulation_3.merge_triangulations(flatten_triangulation_1, flatten_triangulation_2)
plot_triangulation(triangulation_3, color_sc)
triangulation_3.save('merged_triangulation')
for cell in triangulation.active_cells():
vertex = cell.get_vertex(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We start by creating a 2D Triangulation of an hyper cube and we globally refine it twice. You can read the documention of Triangulation by typing
Step2: Now we would like to visualize the mesh that has been created. We can output a vtu file using
Step3: The function below takes as input a Triangulation and a function that is used to define the color scheme. In this function, we loop over all the active cells, get the coordinates of the vertices, and use these coordinates to create polygons that we plot. We can loop over the active cells using
Step4: We know define a color scheme function and plot the Triangulation
Step5: Now let's assume that the left half of the domain is composed of a different material than the right half of the domain. We will loop over all the cells and if the abscissa of the cell barycenter is less than 0.5, we will assign zero to the material_id of the cell. The others cells will be assigned a material_id of one.
Step6: We will refine isotropically the cells that have a material_id equal to zero and plot the Triangulation.
Step7: We will now show how to merge two Triangulations. In order to merge the two Triangulations, we will need to move (shift) the second Triangulation such that it doesn't overlap with the first one.
Step8: We are now almost ready to merge the Triangulations. However, deal.II does not allow us to merge Triangulations that have been refined. We can use the flatten_triangulation function to create new Triangulations that are not refined but this function does not work if the mesh contains hanging nodes. Thus, we need to modify the first triangulation.
Step9: Like expected the second Triangulation is moved too far on the right. This mistake can easily fixed by moving flatten_triangulation_2 to the left and merging the Triangulations once again. We can see the advantage of using python over C++. In C++, we would have to recompile and rerun the code while in python we can very easily fix our mistake.
Step10: Now that we are done generating the grid, we need to save it in a format that will make it easy to load in our C++ code. This can be done using the save function python and then using the Triangulation
Step11: The C++ code to load the mesh is
|
7,274
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np # we'll be using this shorthand for the NumPy library throughout
dataset = np.load("../data/images/project_data.npy")
n_samples = dataset.shape[0]
print("Data shape: ", dataset.shape)
import matplotlib.pyplot as plt
rows = 2
cols = 2
n_plots = rows*cols
fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(10, 10 ))
for row in axs:
for ax in row:
one of pythons most wonderful attributes is that if an object is iterable it can be
directly iterated over, like above.
ax is an axis object from the 2d array of axis objects
which = np.random.randint(0, n_samples)
ax.imshow(dataset[which].reshape(64, 64))
ax.axis("off")
from sklearn.model_selection import train_test_split
targets = np.load("../data/targets/project_targets.npy")
train_X, test_X, train_y, test_y = train_test_split(dataset, targets, test_size=0.15)
nonzero_indices = np.nonzero(train_X)
nonzero_elements = train_X[nonzero_indices]
print("Train Mean: ", nonzero_elements.mean())
print("Train Std.: ", nonzero_elements.std())
print("-------------")
print("Test Mean: ", test_X[np.nonzero(test_X)].mean())
print("Test Std.: ", test_X[np.nonzero(test_X)].std())
print("############")
nonzero_scaled = nonzero_elements/nonzero_elements.std()
train_X[nonzero_indices] = nonzero_scaled
test_X[np.nonzero(test_X)] /= nonzero_elements.std()
print("Train Mean: ", nonzero_scaled.mean())
print("Train Std.: ", nonzero_scaled.std())
print("-------------")
print("Test Mean: ", test_X[np.nonzero(test_X)].mean())
print("Test Std.: ", test_X[np.nonzero(test_X)].std())
import matplotlib.pyplot as plt
rows = 2
cols = 2
n_plots = rows*cols
fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(10, 10 ))
for row in axs:
for ax in row:
one of pythons most wonderful attributes is that if an object is iterable it can be
directly iterated over, like above.
ax is an axis object from the 2d array of axis objects
which = np.random.randint(0, train_X.shape[0])
ax.imshow(train_X[which].reshape(64, 64))
ax.text(5, 5, "{}".format(int(train_y[which])), bbox={'facecolor': 'white', 'pad': 10})
ax.axis("off")
from sklearn.preprocessing import OneHotEncoder
onehot_train_y = OneHotEncoder(sparse=False, categories="auto").fit_transform(train_y.reshape(-1, 1))
onehot_test_y = OneHotEncoder(sparse=False, categories="auto").fit_transform(test_y.reshape(-1, 1))
print("Onehot train targets:", onehot_train_y.shape)
print("Onehot test targets:",onehot_test_y.shape)
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.regularizers import l2
from keras.optimizers import SGD, adam
flat_train_X = np.reshape(train_X, (train_X.shape[0], train_X.shape[1]*train_X.shape[2]*train_X.shape[3]))
flat_test_X = np.reshape(test_X, (test_X.shape[0], train_X.shape[1]*train_X.shape[2]*train_X.shape[3]))
logreg = Sequential()
logreg.add(Dense(2, kernel_regularizer=l2(0.01), activation="softmax"))
eta = 0.001
optimizer = SGD(eta)
logreg.compile(optimizer, loss="binary_crossentropy", metrics=["accuracy",])
history = logreg.fit(
x=flat_train_X,
y=onehot_train_y,
batch_size=100,
epochs=200,
validation_split=0.15,
verbose=2
)
model_config = {
"n_conv":2,
"receptive_fields":[3, 3],
"strides":[1, 1,],
"n_filters":[2, 2],
"conv_activation":[1, 1],
"max_pool":[1, 1],
"n_dense":1,
"neurons":[10,],
"dense_activation":[1,]
}
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, ReLU, Input, Softmax
from keras.regularizers import l2
def create_convolutional_neural_network_keras(input_shape, config, n_classes=2):
Modified from MH Jensen's course on machine learning in physics:
https://github.com/CompPhysics/MachineLearningMSU/blob/master/doc/pub/CNN/ipynb/CNN.ipynb
model=Sequential()
for i in range(config["n_conv"]):
receptive_field = config["receptive_fields"][i]
strides = config["strides"][i]
n_filters = config["n_filters"][i]
pad = "same" if i == 0 else "same"
input_shape = input_shape if i==0 else None
if i == 0:
conv = Conv2D(
n_filters,
(receptive_field, receptive_field),
input_shape=input_shape,
padding=pad,
strides=strides,
kernel_regularizer=l2(0.01)
)
else:
conv = Conv2D(
n_filters,
(receptive_field, receptive_field),
padding=pad,
strides=strides,
kernel_regularizer=l2(0.01)
)
model.add(conv)
pool = config["max_pool"][i]
activation = config["conv_activation"][i]
if activation:
model.add(ReLU())
if pool:
model.add(MaxPooling2D(2))
model.add(Flatten())
for i in range(config["n_dense"]):
n_neurons = config["neurons"][i]
model.add(
Dense(
n_neurons,
kernel_regularizer=l2(0.01)
))
activation = config["dense_activation"][i]
if activation:
model.add(ReLU())
model.add(
Dense(
n_classes,
activation='softmax',
kernel_regularizer=l2(0.01))
)
return model
model_o = create_convolutional_neural_network_keras(train_X.shape[1:], model_config, n_classes=2)
#model_o = mhj(train_X.shape[1:], 3, 2, 10, 2, 0.01)
print(model_o.summary())
from keras.utils import plot_model
plot_model(model_o, to_file="convnet.png")
eta = 0.01
sgd = SGD(lr=eta, )
adam = adam(lr=eta, beta_1=0.5, )
model_o.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
%matplotlib notebook
import matplotlib.pyplot as plt
history = model_o.fit(
x=train_X,
y=onehot_train_y,
batch_size=50,
epochs=40,
validation_split=0.15,
verbose=2
)
# copied from https://keras.io/visualization/
# Plot training & validation accuracy values
fig, axs= plt.subplots(figsize=(10, 8), nrows=2)
fig.suptitle('Model performance')
axs[0].plot(history.history['acc'], "x-",alpha=0.8)
axs[0].plot(history.history['val_acc'], "x-", alpha=0.8)
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].legend(['Train', 'Test'], loc='upper left')
# Plot training & validation loss values
axs[1].plot(history.history['loss'], "o-",alpha=0.8)
axs[1].plot(history.history['val_loss'], "o-", alpha=0.8)
axs[1].set_ylabel('Loss')
axs[1].legend(['Train', 'Test'], loc='upper left')
from keras.callbacks import EarlyStopping, ModelCheckpoint
callbacks = [EarlyStopping(min_delta=0.0001, patience=4), ModelCheckpoint("../checkpoints/ckpt")
history = model_o.fit(
x=train_X,
y=onehot_train_y,
batch_size=50,
epochs=150,
validation_split=0.15,
verbose=2,
callbacks=callbacks
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Task 1b
Step3: Task 1c
Step5: We also want to plot up the data again to confirm that our scaling is sensible, you should reuse your code from above for this.
Step6: 1d
Step7: Prelude to task 2
Step9: The logistic regression model doesn't work, clearly. What about the data prohobits it from doing so?
Step10: 2b
Step11: 2b
Step12: 2c
Step13: 3a
|
7,275
|
<ASSISTANT_TASK:>
Python Code:
# Setting extend, grid and compile
# Setting the extent
sandstone = GeMpy_core.GeMpy()
# Create Data class with raw data
sandstone.import_data( 696000,747000,6863000,6950000,-20000, 2000,
path_f = os.pardir+"/input_data/a_Foliations.csv",
path_i = os.pardir+"/input_data/a_Points.csv")
sandstone.Data.set_series({"EarlyGranite_Series":sandstone.Data.formations[-1],
"BIF_Series":(sandstone.Data.formations[0], sandstone.Data.formations[1]),
"SimpleMafic_Series":sandstone.Data.formations[2]},
order = ["EarlyGranite_Series",
"BIF_Series",
"SimpleMafic_Series"])
sandstone.Data.Foliations;
# Create a class Grid so far just regular grid
sandstone.create_grid()
sandstone.Grid.grid
sandstone.Plot.plot_data(serie = sandstone.Data.series.columns.values[1])
sandstone.set_interpolator()
sandstone.Plot.plot_potential_field(10, n_pf=0)
sandstone.Plot.plot_potential_field(10, n_pf=1, cmap = "magma", plot_data = False)
sandstone.Plot.plot_potential_field(10, n_pf=2)
# Reset the block
sandstone.Interpolator.block.set_value(np.zeros_like(sandstone.Grid.grid[:,0]))
# Compute the block
sandstone.Interpolator.compute_block_model([0,1,2], verbose = 1)
sandstone.Plot.plot_block_section()
Export model to VTK
Export the geology blocks to VTK for visualisation of the entire 3-D model in an
external VTK viewer, e.g. Paraview.
..Note:: Requires pyevtk, available for free on: https://github.com/firedrakeproject/firedrake/tree/master/python/evtk
**Optional keywords**:
- *vtk_filename* = string : filename of VTK file (default: output_name)
- *data* = np.array : data array to export to VKT (default: entire block model)
vtk_filename = "noddyFunct2"
extent_x = 10
extent_y = 10
extent_z = 10
delx = 0.2
dely = 0.2
delz = 0.2
from pyevtk.hl import gridToVTK
# Coordinates
x = np.arange(0, extent_x + 0.1*delx, delx, dtype='float64')
y = np.arange(0, extent_y + 0.1*dely, dely, dtype='float64')
z = np.arange(0, extent_z + 0.1*delz, delz, dtype='float64')
# self.block = np.swapaxes(self.block, 0, 2)
gridToVTK(vtk_filename, x, y, z, cellData = {"geology" : sol})
%%timeit
sol = interpolator.geoMigueller(dips,dips_angles,azimuths,polarity, rest, ref)[0]
sandstone.block_export.profile.summary()
%%timeit
# Reset the block
sandstone.block.set_value(np.zeros_like(sandstone.grid[:,0]))
# Compute the block
sandstone.compute_block_model([0,1,2], verbose = 0)
sandstone.block_export.profile.summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: All input data is stored in pandas dataframes under, self.Data.Interances and self.Data.Foliations
Step2: Plotting raw data
Step3: Class Interpolator
Step4: Early granite
Step5: BIF Series
Step6: SImple mafic
Step7: Optimizing the export of lithologies
Step9: Export vtk
Step10: Performance Analysis
Step11: GPU
|
7,276
|
<ASSISTANT_TASK:>
Python Code:
# Specifically for the iPython Notebook environment for clearing output.
from IPython.display import clear_output
# Global variables
board = [' '] * 10
game_state = True
announce = ''
# Note: Game will ignore the 0 index
def reset_board():
global board,game_state
board = [' '] * 10
game_state = True
def display_board():
''' This function prints out the board so the numpad can be used as a reference '''
# Clear current cell output
clear_output()
# Print board
print " "+board[7]+" |"+board[8]+" | "+board[9]+" "
print "------------"
print " "+board[4]+" |"+board[5]+" | "+board[6]+" "
print "------------"
print " "+board[1]+" |"+board[2]+" | "+board[3]+" "
def win_check(board, player):
''' Check Horizontals,Verticals, and Diagonals for a win '''
if (board[7] == board[8] == board[9] == player) or \
(board[4] == board[5] == board[6] == player) or \
(board[1] == board[2] == board[3] == player) or \
(board[7] == board[4] == board[1] == player) or \
(board[8] == board[5] == board[2] == player) or \
(board[9] == board[6] == board[3] == player) or \
(board[1] == board[5] == board[9] == player) or \
(board[3] == board[5] == board[7] == player):
return True
else:
return False
def full_board_check(board):
''' Function to check if any remaining blanks are in the board '''
if " " in board[1:]:
return False
else:
return True
def ask_player(mark):
''' Asks player where to place X or O mark, checks validity '''
global board
req = 'Choose where to place your: ' + mark
while True:
try:
choice = int(raw_input(req))
except ValueError:
print("Sorry, please input a number between 1-9.")
continue
if choice not in range(1,10):
print("Sorry, please input a number between 1-9.")
continue
if board[choice] == " ":
board[choice] = mark
break
else:
print "That space isn't empty!"
continue
def player_choice(mark):
global board,game_state,announce
#Set game blank game announcement
announce = ''
#Get Player Input
mark = str(mark)
# Validate input
ask_player(mark)
#Check for player win
if win_check(board,mark):
clear_output()
display_board()
announce = mark +" wins! Congratulations"
game_state = False
#Show board
clear_output()
display_board()
#Check for a tie
if full_board_check(board):
announce = "Tie!"
game_state = False
return game_state,announce
def play_game():
reset_board()
global announce
# Set marks
X='X'
O='O'
while True:
# Show board
clear_output()
display_board()
# Player X turn
game_state,announce = player_choice(X)
print announce
if game_state == False:
break
# Player O turn
game_state,announce = player_choice(O)
print announce
if game_state == False:
break
# Ask player for a rematch
rematch = raw_input('Would you like to play again? y/n')
if rematch == 'y':
play_game()
else:
print "Thanks for playing!"
play_game()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next make a function that will reset the board, in this case we'll store values as a list.
Step2: Now create a function to display the board, I'll use the num pad as the board reference.
Step3: Define a function to check for a win by comparing inputs in the board list. Note
Step4: Define function to check if the board is already full in case of a tie. (This is straightforward with our board stored as a list)
Step5: Now define a function to get player input and do various checks on it.
Step6: Now have a function that takes in the player's choice (via the ask_player function) then returns the game_state.
Step7: Finally put it all together in a function to play the game.
Step8: Let's play!
|
7,277
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
image_train = graphlab.SFrame('image_train_data/')
# deep_learning_model = graphlab.load_model('http://s3.amazonaws.com/GraphLab-Datasets/deeplearning/imagenet_model_iter45')
# image_train['deep_features'] = deep_learning_model.extract_features(image_train)
image_train.head()
knn_model = graphlab.nearest_neighbors.create(image_train,
features=['deep_features'],
label='id')
graphlab.canvas.set_target('ipynb')
cat = image_train[18:19]
cat['image'].show()
knn_model.query(cat)
def get_images_from_ids(query_result):
return image_train.filter_by(query_result['reference_label'],'id')
cat_neighbors = get_images_from_ids(knn_model.query(cat))
cat_neighbors['image'].show()
car = image_train[8:9]
car['image'].show()
get_images_from_ids(knn_model.query(car))['image'].show()
show_neighbors = lambda i: get_images_from_ids(knn_model.query(image_train[i:i+1]))['image'].show()
show_neighbors(8)
show_neighbors(26)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the CIFAR-10 dataset
Step2: Computing deep features for our images
Step3: Train a nearest-neighbors model for retrieving images using deep features
Step4: Use image retrieval model with deep features to find similar images
Step5: We are going to create a simple function to view the nearest neighbors to save typing
Step6: Very cool results showing similar cats.
Step7: Just for fun, let's create a lambda to find and show nearest neighbor images
|
7,278
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function
import emcee
import triangle
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
# Reproducible results!
np.random.seed(123)
# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534
# Generate some synthetic data from the model.
N = 50
x = np.sort(10*np.random.rand(N))
yerr = 0.1+0.5*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)
# Define the probability function as likelihood * prior.
def lnprior(theta):
m, b, lnf = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
return 0.0
return -np.inf
def lnlike(theta, x, y, yerr):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
# Find the maximum likelihood value.
chi2 = lambda *args: -2 * lnlike(*args)
result = op.minimize(chi2, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
# Set up the sampler.
ntemps, ndim, nwalkers = 5, 3, 100
pos = [[result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
for j in range(ntemps)]
sampler = emcee.PTSampler(ntemps, nwalkers, ndim, lnlike, lnprior, loglargs=(x, y, yerr))
sampler.paralell = True
N = 200
sampler.reset()
out = sampler.run_mcmc(pos, N)
sampler.chain.shape
sampler.thermodynamic_integration_log_evidence()
class sampler_data():
def __init__(self, chain, lnlikelihood, betas):
self.chain = chain
self.lnlikelihood = lnlikelihood
self.betas = betas
sampler_data.thermodynamic_integration_log_evidence = emcee.PTSampler.thermodynamic_integration_log_evidence.im_func
sampler2 = sampler_data(sampler.chain, sampler.lnlikelihood, sampler.betas)
sampler2.thermodynamic_integration_log_evidence()
import os,sys,time
import numpy as np
from IPython import parallel
rc = parallel.Client()
dview = rc[:]
dview.block = True
dview.use_dill() # Standard pickle fails with functions
dview.execute("import numpy as np")
print(len(dview))
def run(dview, sampler, **kwargs):
Run the sampler on all instances of the dview
This spreads the total workload over the n views. The basic idea is to
reduce the number of walkers on each view in inverse proportion to the
number of view that we have. So, it will reduce the time for any simultion
by a factor 1/nviews while still producing equivalent results.
nviews = len(dview)
for key, val in kwargs.items():
dview[key] = val
def MakeEven(j):
if j % 2 == 0:
return j
else:
return j+1
nwalkers_list = [MakeEven(int(sampler.nwalkers/nviews))
for j in range(len(dview)-1)]
nwalkers_list.append(MakeEven(sampler.nwalkers - sum(nwalkers_list)))
print("Setting up walkers on each machine:\n"
"Machine: Walkers")
for i, nwalkers in enumerate(nwalkers_list):
print("{} : {}".format(i, nwalkers))
dview.push(dict(nwalkers=nwalkers, ID=i), targets=i)
print("Total: {}".format(sum(nwalkers_list)))
dview['sampler'] = sampler
dview.execute(
"sampler.nwalkers = nwalkers\n"
"sampler.reset()\n"
"pos = np.array(pos)[:, ID*nwalkers:(ID+1)*nwalkers, :]\n"
"sampler.run_mcmc(pos, nsteps)\n"
"chain = sampler.chain[:, :, :, :]\n"
"lnlikelihood = sampler.lnlikelihood[:, :, :]\n")
chain = np.concatenate(dview.get("chain"), axis=1)
lnlikelihood = np.concatenate(dview.get("lnlikelihood"), axis=1)
return sampler_data(chain, lnlikelihood, sampler.betas)
nsteps=200
sampler3 = emcee.PTSampler(ntemps, nwalkers, ndim, lnlike, lnprior, loglargs=(x, y, yerr))
samplerD = run(dview, sampler3, pos=pos, nsteps=nsteps)
print(samplerD.chain.shape, samplerD.lnlikelihood.shape, samplerD.betas.shape)
print(sampler.chain.shape, sampler.lnlikelihood.shape, sampler.betas.shape)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3)
ax1.plot(samplerD.chain[0, :, :, 0], lw=0.1, color="k")
ax2.plot(samplerD.chain[0, :, :, 1], lw=0.1, color="k")
ax3.plot(samplerD.chain[0, :, :, 2], lw=0.1, color="k")
plt.show()
triangle.corner(samplerD.chain[0, :, 100:, :].reshape((-1, ndim)))
plt.show()
samplerD.thermodynamic_integration_log_evidence()
npoints = 1000
burnin = 100
sampler.reset()
%timeit out = sampler.run_mcmc(pos, npoints, rstate0=np.random.get_state())
npoints = 1000
burnin = 100
sampler3.reset()
%timeit samplerD = run(dview, sampler3, pos=pos, npoints=npoints, burnin=burnin, ndim=ndim, ntemps=ntemps, lnprior=lnprior, lnlike=lnlike)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It is this sampler which has all the methods and functionality that we want. For example let's run it and see the size of the out output chains
Step2: Create a new sampler object
Step3: This limits to only the functionality that we have provided here.
Step4: Then we can call the method and get the answer
Step5: The use of this is that once we have run our MCMC simulations in parallel, we gather together all the attributes such as the chains and betas, then pass them into a single new object. This object is passed onto the post-processing scripts which don't worry whether it was created using a single machine or several!
Step7: Now create a run function to wrap up the process. This splits the nwalkers over the nviews, then adds them back together so that in the end, we get the same number of walkers etc as we would without using parallelisation
Step8: and now we run it
Step9: We want to check that the sampler attributes are the same as those run without the parallelisation
Step10: Timing the methods
|
7,279
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
import numpy as np
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
batch_size = 16
patch_size = 5
depth = 16
num_hidden = 64
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer2_biases)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 1001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 50 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reformat into a TensorFlow-friendly shape
Step2: Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.
|
7,280
|
<ASSISTANT_TASK:>
Python Code:
# Get http://geneontology.org/ontology/go-basic.obo
from goatools.base import download_go_basic_obo
obo_fname = download_go_basic_obo()
# Get ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz
from goatools.base import download_ncbi_associations
gene2go = download_ncbi_associations()
from goatools.obo_parser import GODag
obodag = GODag("go-basic.obo")
from goatools.rpt_lev_depth import RptLevDepth
rptobj = RptLevDepth(obodag)
rptobj.write_summary_cnts_all()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Download Associations, if necessary
Step2: 3. Initialize GODag object
Step3: 4. Initialize Reporter class
Step4: 5. Generate depth/level report for all GO terms
|
7,281
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Image
from IPython.html.widgets import interact, interactive, fixed
Image('fermidist.png')
def fermidist(energy, mu, kT):
Compute the Fermi distribution at energy, mu and kT.
F = 1/(np.exp((energy-mu)/kT)+1)
return F
assert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033)
assert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0),
np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532,
0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ]))
def plot_fermidist(mu, kT):
e = np.linspace(0,10.0,100)
fermdata = fermidist(e,mu,kT)
f = plt.figure(figsize=(10,7))
plt.plot(e,fermdata, color='red')
plt.xlim(0,10)
plt.ylim(0,1)
plt.ylabel('Fermi distribution')
plt.xlabel('single particle energy')
plt.title('Fermi distribution vs. single particle energy')
plt.tick_params(top=False,right=False, direction = 'out')
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plot_fermidist(4.0, 1.0)
assert True # leave this for grading the plot_fermidist function
interact(plot_fermidist, mu=(0.0,5.0,0.1), kT=(0.1,10.0,0.1));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the Fermi distribution
Step3: In this equation
Step4: Write a function plot_fermidist(mu, kT) that plots the Fermi distribution $F(\epsilon)$ as a function of $\epsilon$ as a line plot for the parameters mu and kT.
Step5: Use interact with plot_fermidist to explore the distribution
|
7,282
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
Isess = tf.InteractiveSession()
m1 = [[1.0, 2.0], [3.0, 4.0]] #list
m2 = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) #numpy ndarray
m3 = tf.constant([[1.0, 2.0], [3.0, 4.0]]) #Tensor constant object
print(type(m1))
print(type(m2))
print(type(m3))
t1 = tf.convert_to_tensor(m1, dtype=tf.float32)
print(type(t1))
m4 = tf.constant([ [1,2], [3,4] ])
print(m4)
tf.ones([3,3]) * 0.5
x = tf.constant([1.,2.])
print(x)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
result = sess.run(tf.negative(x))
print(result)
sess.close()
# sess = tf.InteractiveSession() #start Session in interactive mode
raw_data = [1., 3., -5., -4., 0., 3., 9.]
spike = tf.Variable(False) #create a Boolean tf variable
spike.initializer.run() #all tf variables must be initialized
for i in range(1, len(raw_data)):
if raw_data[i] - raw_data[i-1] > 3:
updater = tf.assign(spike, True) #update variable with assign(varName, value)
updater.eval() #evaluate variable to see the change
else:
tf.assign(spike, False).eval()
print("Spike", spike.eval())
# sess.close()
raw_data = [1., 3., -5., -4., 0., 3., 9.]
spikes = tf.Variable([False] * len(raw_data), name='spikes') #create vector of boolean variables
spikes.initializer.run() #and initialize
saver = tf.train.Saver() #saves and restores variables (all by default)
for i in range(1, len(raw_data)):
if raw_data[i] - raw_data[i-1] > 3:
spikes_val = spikes.eval()
spikes_val[i] = True
updater = tf.assign(spikes, spikes_val) #update variable with assign(varName, value)
updater.eval() #evaluate variable to see the change
save_path = saver.save(Isess, './spikes.ckpt') #saves variables to disk
print("spikes data saved in file: %s" % save_path)
%ls spike*
spikes = tf.Variable([False]*7, name='spikes')
# spikes.initializer.run() #don't need to init, they'll be directly loaded
saver = tf.train.Saver()
saver.restore(Isess, "./spikes.ckpt")
print(spikes.eval())
raw_data = np.random.normal(10, 1, 100)
alpha = tf.constant(0.05)
curr_value = tf.placeholder(tf.float32)
prev_avg = tf.Variable(0.)
update_avg = alpha * curr_value + (1 - alpha) * prev_avg
### Create Summary Nodes for TensorBoard ###
avg_hist = tf.summary.scalar("running_average", update_avg)
value_hist = tf.summary.scalar("incoming_values", curr_value)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(len(raw_data)):
summary_str, curr_avg = sess.run([merged, update_avg], feed_dict={curr_value: raw_data[i]})
sess.run(tf.assign(prev_avg, curr_avg))
print(raw_data[i], curr_avg)
writer.add_summary(summary_str, i)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Representing Tensors
Step2: Tensorflow Operators
Step3: Sessions can take placeholders, variables, and constants as input
Step4: saver.save() saves a compact binary version of the variables in 'ckpt' files. They can only be read by using the saver.restore() function
|
7,283
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import sys
sys.path.append("/home/darlan/cvs_files/pyphysim/")
# xxxxxxxxxx Import Statements xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from pyphysim.simulations.runner import SimulationRunner
from pyphysim.simulations.parameters import SimulationParameters
from pyphysim.simulations.results import SimulationResults, Result
from pyphysim.comm import modulators, channels
from pyphysim.util.conversion import dB2Linear
from pyphysim.util import misc
import numpy as np
from pprint import pprint
from apps.simulate_comp import plot_spectral_efficience_all_metrics, plot_per_all_metrics
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx333
results_filename_rank_1 = 'bd_results_2x2_ext_int_rank_1'
results_filename_rank_2 = 'bd_results_2x2_ext_int_rank_2'
results_rank_1 = SimulationResults.load_from_file('{0}{1}'.format(results_filename_rank_1, '.pickle'))
SNR_rank_1 = results_rank_1.params['SNR']
results_rank_2 = SimulationResults.load_from_file('{0}{1}'.format(results_filename_rank_2, '.pickle'))
SNR_rank_2 = results_rank_2.params['SNR']
Pe_dBm = 10
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20,7))
fig = plot_spectral_efficience_all_metrics(results_rank_1, Pe_dBm, ax[0])
fig = plot_per_all_metrics(results_rank_1, Pe_dBm, ax[1])
Pe_dBm = 0
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20,7))
fig = plot_spectral_efficience_all_metrics(results_rank_1, Pe_dBm, ax[0])
fig = plot_per_all_metrics(results_rank_1, Pe_dBm, ax[1])
Pe_dBm = -10
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20,7))
fig = plot_spectral_efficience_all_metrics(results_rank_1, Pe_dBm, ax[0])
fig = plot_per_all_metrics(results_rank_1, Pe_dBm, ax[1])
Pe_dBm = 10
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20,7))
fig = plot_spectral_efficience_all_metrics(results_rank_2, Pe_dBm, ax[0])
fig = plot_per_all_metrics(results_rank_2, Pe_dBm, ax[1])
Pe_dBm = 0
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20,7))
fig = plot_spectral_efficience_all_metrics(results_rank_2, Pe_dBm, ax[0])
fig = plot_per_all_metrics(results_rank_2, Pe_dBm, ax[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we import some modules we use and add the PyPhysim to the python path.
Step2: Load the results from disk
Step3: Results for external interference of 10dBm (rank1)
Step4: Results for external interference of 0dBm (rank 1)
Step5: Results for external interference of -10dBm (rank 1)
Step6: Results for external interference of 10dBm (rank2)
Step7: Results for external interference of 0dBm (rank 2)
|
7,284
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from collections import defaultdict
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import rcParams
import matplotlib.cm as cm
import matplotlib as mpl
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843)]
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.facecolor'] = 'white'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'white'
rcParams['patch.facecolor'] = dark2_colors[0]
rcParams['font.family'] = 'StixGeneral'
def remove_border(axes=None, top=False, right=False, left=True, bottom=True):
Minimize chartjunk by stripping out unnecesasry plot borders and axis ticks
The top/right/left/bottom keywords toggle whether the corresponding plot border is drawn
ax = axes or plt.gca()
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
ax.spines['left'].set_visible(left)
ax.spines['bottom'].set_visible(bottom)
#turn off all ticks
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
#now re-enable visibles
if top:
ax.xaxis.tick_top()
if bottom:
ax.xaxis.tick_bottom()
if left:
ax.yaxis.tick_left()
if right:
ax.yaxis.tick_right()
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
#this mapping between states and abbreviations will come in handy later
states_abbrev = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
#adapted from https://github.com/dataiap/dataiap/blob/master/resources/util/map_util.py
#load in state geometry
state2poly = defaultdict(list)
data = json.load(file("data/us-states.json"))
for f in data['features']:
state = states_abbrev[f['id']]
geo = f['geometry']
if geo['type'] == 'Polygon':
for coords in geo['coordinates']:
state2poly[state].append(coords)
elif geo['type'] == 'MultiPolygon':
for polygon in geo['coordinates']:
state2poly[state].extend(polygon)
def draw_state(plot, stateid, **kwargs):
draw_state(plot, stateid, color=..., **kwargs)
Automatically draws a filled shape representing the state in
subplot.
The color keyword argument specifies the fill color. It accepts keyword
arguments that plot() accepts
for polygon in state2poly[stateid]:
xs, ys = zip(*polygon)
plot.fill(xs, ys, **kwargs)
def make_map(states, label):
Draw a cloropleth map, that maps data onto the United States
Inputs
-------
states : Column of a DataFrame
The value for each state, to display on a map
label : str
Label of the color bar
Returns
--------
The map
fig = plt.figure(figsize=(12, 9))
ax = plt.gca()
if states.max() < 2: # colormap for election probabilities
cmap = cm.RdBu
vmin, vmax = 0, 1
else: # colormap for electoral votes
cmap = cm.binary
vmin, vmax = 0, states.max()
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
skip = set(['National', 'District of Columbia', 'Guam', 'Puerto Rico',
'Virgin Islands', 'American Samoa', 'Northern Mariana Islands'])
for state in states_abbrev.values():
if state in skip:
continue
color = cmap(norm(states.ix[state]))
draw_state(ax, state, color = color, ec='k')
#add an inset colorbar
ax1 = fig.add_axes([0.45, 0.70, 0.4, 0.02])
cb1=mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation='horizontal')
ax1.set_title(label)
remove_border(ax, left=False, bottom=False)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(-180, -60)
ax.set_ylim(15, 75)
return ax
# We are pretending to build our model 1 month before the election
import datetime
today = datetime.datetime(2012, 10, 2)
today
electoral_votes = pd.read_csv("data/electoral_votes.csv").set_index('State')
electoral_votes.head()
make_map(electoral_votes.Votes, "Electoral Vlotes");
predictwise = pd.read_csv('data/predictwise.csv').set_index('States')
predictwise.head()
#your code here
make_map(predictwise.Obama, "P(Obama): PredictWise")
Function
--------
simulate_election
Inputs
------
model : DataFrame
A DataFrame summarizing an election forecast. The dataframe has 51 rows -- one for each state and DC
It has the following columns:
Obama : Forecasted probability that Obama wins the state
Votes : Electoral votes for the state
The DataFrame is indexed by state (i.e., model.index is an array of state names)
n_sim : int
Number of simulations to run
Returns
-------
results : Numpy array with n_sim elements
Each element stores the number of electoral college votes Obama wins in each simulation.
#Your code here
def simulate_election(model, n_sim):
#each column simulates a single outcome from the 50 states + DC
#Obama wins the simulation if the random number is < the win probability
simulations = np.random.uniform(size=(51, n_sim))
obama_votes = (simulations < model.Obama.values.reshape(-1, 1)) * model.Votes.values.reshape(-1, 1)
#summing over rows gives the total electoral votes for each simulation
return obama_votes.sum(axis=0)
result = simulate_election(predictwise, 10000)
#compute the probability of an Obama win, given this simulation
#Your code here
print (result >= 269).mean()
Function
--------
plot_simulation
Inputs
------
simulation: Numpy array with n_sim (see simulate_election) elements
Each element stores the number of electoral college votes Obama wins in each simulation.
Returns
-------
Nothing
#your code here
def plot_simulation(simulation):
plt.hist(simulation, bins=np.arange(200, 538, 1),
label='simulations', align='left', normed=True)
plt.axvline(332, 0, .5, color='r', label='Actual Outcome')
plt.axvline(269, 0, .5, color='k', label='Victory Threshold')
p05 = np.percentile(simulation, 5.)
p95 = np.percentile(simulation, 95.)
iq = int(p95 - p05)
pwin = ((simulation >= 269).mean() * 100)
plt.title("Chance of Obama Victory: %0.2f%%, Spread: %d votes" % (pwin, iq))
plt.legend(frameon=False, loc='upper left')
plt.xlabel("Obama Electoral College Votes")
plt.ylabel("Probability")
remove_border()
plot_simulation(result)
result
gallup_2012=pd.read_csv("data/g12.csv").set_index('State')
gallup_2012["Unknown"] = 100 - gallup_2012.Democrat - gallup_2012.Republican
gallup_2012.head()
Function
--------
simple_gallup_model
A simple forecast that predicts an Obama (Democratic) victory with
0 or 100% probability, depending on whether a state
leans Republican or Democrat.
Inputs
------
gallup : DataFrame
The Gallup dataframe above
Returns
-------
model : DataFrame
A dataframe with the following column
* Obama: probability that the state votes for Obama. All values should be 0 or 1
model.index should be set to gallup.index (that is, it should be indexed by state name)
Examples
---------
>>> simple_gallup_model(gallup_2012).ix['Florida']
Obama 1
Name: Florida, dtype: float64
>>> simple_gallup_model(gallup_2012).ix['Arizona']
Obama 0
Name: Arizona, dtype: float64
#your code here
def simple_gallup_model(gallup):
return pd.DataFrame(dict(Obama=(gallup.Dem_Adv > 0).astype(float)))
model = simple_gallup_model(gallup_2012)
model = model.join(electoral_votes)
prediction = simulate_election(model, 10000)
plt.xlim(200,400)
plot_simulation(prediction)
plt.show()
make_map(model.Obama, "P(Obama): Simple Model")
Function
--------
uncertain_gallup_model
A forecast that predicts an Obama (Democratic) victory if the random variable drawn
from a Gaussian with mean Dem_Adv and standard deviation 3% is >0
Inputs
------
gallup : DataFrame
The Gallup dataframe above
Returns
-------
model : DataFrame
A dataframe with the following column
* Obama: probability that the state votes for Obama.
model.index should be set to gallup.index (that is, it should be indexed by state name)
# your code here
from scipy.special import erf
def uncertain_gallup_model(gallup):
sigma = 3
prob = .5 * (1 + erf(gallup.Dem_Adv / np.sqrt(2 * sigma**2)))
return pd.DataFrame(dict(Obama=prob), index=gallup.index)
model = uncertain_gallup_model(gallup_2012)
model = model.join(electoral_votes)
make_map(model.Obama, "P(Obama): Gallup + Uncertainty")
plt.show()
prediction = simulate_election(model, 10000)
plot_simulation(prediction)
Function
--------
biased_gallup_poll
Subtracts a fixed amount from Dem_Adv, beofore computing the uncertain_gallup_model.
This simulates correcting a hypothetical bias towards Democrats
in the original Gallup data.
Inputs
-------
gallup : DataFrame
The Gallup party affiliation data frame above
bias : float
The amount by which to shift each prediction
Examples
--------
>>> model = biased_gallup(gallup, 1.)
>>> model.ix['Flordia']
>>> .460172
#your code here
def biased_gallup(gallup, bias):
g2 = gallup.copy()
g2.Dem_Adv -= bias
return uncertain_gallup_model(g2)
#your code here
model = biased_gallup(gallup_2012, 1)
model = model.join(electoral_votes)
prediction = simulate_election(model, 10000)
plot_simulation(prediction)
plt.show()
model = biased_gallup(gallup_2012, 5)
model = model.join(electoral_votes)
prediction = simulate_election(model, 10000)
plot_simulation(prediction)
plt.show()
gallup_08 = pd.read_csv("data/g08.csv").set_index('State')
results_08 = pd.read_csv('data/2008results.csv').set_index('State')
prediction_08 = gallup_08[['Dem_Adv']]
prediction_08['Dem_Win']=results_08["Obama Pct"] - results_08["McCain Pct"]
prediction_08.head()
#your code here
plt.plot(prediction_08.Dem_Adv, prediction_08.Dem_Win, 'o')
plt.xlabel("2008 Gallup Democrat Advantage")
plt.ylabel("2008 Election Democrat Win")
fit = np.polyfit(prediction_08.Dem_Adv, prediction_08.Dem_Win, 1)
x = np.linspace(-40, 80, 10)
y = np.polyval(fit, x)
plt.plot(x, y)
print fit
#your code here
prediction_08[(prediction_08.Dem_Win < 0) & (prediction_08.Dem_Adv > 0)]
print (prediction_08.Dem_Adv - prediction_08.Dem_Win).mean()
#your code here
model = biased_gallup(gallup_2012, 8.06)
model = model.join(electoral_votes)
prediction = simulate_election(model, 10000)
plot_simulation(prediction)
national_results=pd.read_csv("data/nat.csv")
national_results.set_index('Year',inplace=True)
national_results.head()
polls04=pd.read_csv("data/p04.csv")
polls04.State=polls04.State.replace(states_abbrev)
polls04.set_index("State", inplace=True);
polls04.head()
pvi08=polls04.Dem - polls04.Rep - (national_results.xs(2004)['Dem'] - national_results.xs(2004)['Rep'])
pvi08.head()
#your code here
e2008=pd.DataFrame(dict(pvi=pvi08, Dem_Win = prediction_08.Dem_Win, Dem_Adv=prediction_08.Dem_Adv-prediction_08.Dem_Adv.mean()))
e2008['obama_win']=1*(prediction_08.Dem_Win > 0)
e2008 = e2008.sort_index()
e2008.head()
pvi12 = e2008.Dem_Win - (national_results.xs(2008)['Dem'] - national_results.xs(2008)['Rep'])
e2012 = pd.DataFrame(dict(pvi=pvi12, Dem_Adv=gallup_2012.Dem_Adv - gallup_2012.Dem_Adv.mean()))
e2012 = e2012.sort_index()
e2012.head()
results2012 = pd.read_csv("data/2012results.csv")
results2012.set_index("State", inplace=True)
results2012 = results2012.sort_index()
results2012.head()
#your code here
plt.plot(e2008.pvi, e2012.pvi, 'o', label='Data')
fit = np.polyfit(e2008.pvi, e2012.pvi, 1)
x = np.linspace(-40, 80, 10)
y = np.polyval(fit, x)
plt.plot(x, x, '--k', alpha=.3, label='x=y')
plt.plot(x, y, label='Linear fit')
plt.xlabel("2004 PVI")
plt.ylabel("2008 PVI")
plt.legend(loc='upper left')
#your code here
plt.xlabel("Gallup Democrat Advantage (from mean)")
plt.ylabel("pvi")
colors=["red","blue"]
ax=plt.gca()
for label in [0, 1]:
color = colors[label]
mask = e2008.obama_win == label
l = '2008 McCain States' if label == 0 else '2008 Obama States'
ax.scatter(e2008[mask]['Dem_Adv'], e2008[mask]['pvi'], c=color, s=60, label=l)
ax.scatter(e2012['Dem_Adv'], e2012['pvi'], c='gray', s=60,
marker="s", label='2012 States', alpha=.3)
plt.legend(frameon=False, scatterpoints=1, loc='upper left')
remove_border()
from sklearn.linear_model import LogisticRegression
def prepare_features(frame2008, featureslist):
y= frame2008.obama_win.values
X = frame2008[featureslist].values
if len(X.shape) == 1:
X = X.reshape(-1, 1)
return y, X
def fit_logistic(frame2008, frame2012, featureslist, reg=0.0001):
y, X = prepare_features(frame2008, featureslist)
clf2 = LogisticRegression(C=reg)
clf2.fit(X, y)
X_new = frame2012[featureslist]
obama_probs = clf2.predict_proba(X_new)[:, 1]
df = pd.DataFrame(index=frame2012.index)
df['Obama'] = obama_probs
return df, clf2
from sklearn.grid_search import GridSearchCV
def cv_optimize(frame2008, featureslist, n_folds=10, num_p=100):
y, X = prepare_features(frame2008, featureslist)
clf = LogisticRegression()
parameters = {"C": np.logspace(-4, 3, num=num_p)}
gs = GridSearchCV(clf, param_grid=parameters, cv=n_folds)
gs.fit(X, y)
return gs.best_params_, gs.best_score_
def cv_and_fit(frame2008, frame2012, featureslist, n_folds=5):
bp, bs = cv_optimize(frame2008, featureslist, n_folds=n_folds)
predict, clf = fit_logistic(frame2008, frame2012, featureslist, reg=bp['C'])
return predict, clf
e2008.head()
e2012.head()
res, clf = fit_logistic(e2008, e2012, ['Dem_Adv', 'pvi'], reg=0.0001)
res.head()
#your code here
res, clf = cv_and_fit(e2008, e2012, ['Dem_Adv', 'pvi'])
predict2012_logistic = res.join(electoral_votes)
predict2012_logistic.head()
#code to make the histogram
#your code here
prediction = simulate_election(predict2012_logistic, 10000)
plot_simulation(prediction)
#code to make the map
#your code here
make_map(predict2012_logistic.Obama, "P(Obama): Logistic")
from matplotlib.colors import ListedColormap
def points_plot(e2008, e2012, clf):
e2008: The e2008 data
e2012: The e2012 data
clf: classifier
Xtrain = e2008[['Dem_Adv', 'pvi']].values
Xtest = e2012[['Dem_Adv', 'pvi']].values
ytrain = e2008['obama_win'].values == 1
X=np.concatenate((Xtrain, Xtest))
# evenly sampled points
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 50),
np.linspace(y_min, y_max, 50))
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
#plot background colors
ax = plt.gca()
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
cs = ax.contourf(xx, yy, Z, cmap='RdBu', alpha=.5)
cs2 = ax.contour(xx, yy, Z, cmap='RdBu', alpha=.5)
plt.clabel(cs2, fmt = '%2.1f', colors = 'k', fontsize=14)
# Plot the 2008 points
ax.plot(Xtrain[ytrain == 0, 0], Xtrain[ytrain == 0, 1], 'ro', label='2008 McCain')
ax.plot(Xtrain[ytrain == 1, 0], Xtrain[ytrain == 1, 1], 'bo', label='2008 Obama')
# and the 2012 points
ax.scatter(Xtest[:, 0], Xtest[:, 1], c='k', marker="s", s=50, facecolors="k", alpha=.5, label='2012')
plt.legend(loc='upper left', scatterpoints=1, numpoints=1)
return ax
#your code here
points_plot(e2008, e2012, clf)
plt.xlabel("Dem_Adv (from mean)")
plt.ylabel("PVI")
multipoll = pd.read_csv('data/cleaned-state_data2012.csv', index_col=0)
#convert state abbreviation to full name
multipoll.State.replace(states_abbrev, inplace=True)
#convert dates from strings to date objects, and compute midpoint
multipoll.start_date = pd.to_datetime(multipoll.start_date)
multipoll.end_date = pd.to_datetime(multipoll.end_date)
multipoll['poll_date'] = multipoll.start_date + (multipoll.end_date - multipoll.start_date).values / 2
#compute the poll age relative to Oct 2, in days
multipoll['age_days'] = (today - multipoll['poll_date']).values / np.timedelta64(1, 'D')
#drop any rows with data from after oct 2
multipoll = multipoll[multipoll.age_days > 0]
#drop unneeded columns
multipoll = multipoll.drop(['Date', 'start_date', 'end_date', 'Spread'], axis=1)
#add electoral vote counts
multipoll = multipoll.join(electoral_votes, on='State')
#drop rows with missing data
multipoll.dropna()
multipoll.head()
Function
--------
state_average
Inputs
------
multipoll : DataFrame
The multipoll data above
Returns
-------
averages : DataFrame
A dataframe, indexed by State, with the following columns:
N: Number of polls averaged together
poll_mean: The average value for obama_spread for all polls in this state
poll_std: The standard deviation of obama_spread
Notes
-----
For states where poll_std isn't finite (because N is too small), estimate the
poll_std value as .05 * poll_mean
#your code here
def state_average(multipoll):
groups = multipoll.groupby('State')
n = groups.size()
mean = groups.obama_spread.mean()
std = groups.obama_spread.std()
std[std.isnull()] = .05 * mean[std.isnull()]
return pd.DataFrame(dict(N=n, poll_mean=mean, poll_std=std))
avg = state_average(multipoll).join(electoral_votes, how='outer')
avg.head()
def default_missing(results):
red_states = ["Alabama", "Alaska", "Arkansas", "Idaho", "Wyoming"]
blue_states = ["Delaware", "District of Columbia", "Hawaii"]
results.ix[red_states, ["poll_mean"]] = -100.0
results.ix[red_states, ["poll_std"]] = 0.1
results.ix[blue_states, ["poll_mean"]] = 100.0
results.ix[blue_states, ["poll_std"]] = 0.1
default_missing(avg)
avg.head()
Function
--------
aggregated_poll_model
Inputs
------
polls : DataFrame
DataFrame indexed by State, with the following columns:
poll_mean
poll_std
Votes
Returns
-------
A DataFrame indexed by State, with the following columns:
Votes: Electoral votes for that state
Obama: Estimated probability that Obama wins the state
#your code here
def aggregated_poll_model(polls):
sigma = polls.poll_std
prob = .5 * (1 + erf(polls.poll_mean / np.sqrt(2 * sigma ** 2)))
return pd.DataFrame(dict(Obama=prob, Votes=polls.Votes))
#your code here
model = aggregated_poll_model(avg)
sims = simulate_election(model, 10000)
plot_simulation(sims)
plt.xlim(250, 400)
#your code here
make_map(model.Obama, "P(Obama): Poll Aggregation")
multipoll[multipoll.State=="Kansas"]
Function
--------
weighted_state_average
Inputs
------
multipoll : DataFrame
The multipoll data above
Returns
-------
averages : DataFrame
A dataframe, indexed by State, with the following columns:
N: Number of polls averaged together
poll_mean: The average value for obama_spread for all polls in this state
poll_std: The standard deviation of obama_spread
Notes
-----
For states where poll_std isn't finite (because N is too small), estimate the
poll_std value as .05 * poll_mean
#your code here
def weights(df):
lam_age = .5 ** (df.age_days / 30.)
w = lam_age / df.MoE ** 2
return w
def wmean(df):
w = weights(df)
result = (df.obama_spread * w).sum() / w.sum()
return result
def wsig(df):
return df.obama_spread.std()
def weighted_state_average(multipoll):
groups = multipoll.groupby('State')
poll_mean = groups.apply(wmean)
poll_std = groups.apply(wsig)
poll_std[poll_std.isnull()] = poll_mean[poll_std.isnull()] * .05
return pd.DataFrame(dict(poll_mean = poll_mean, poll_std = poll_std))
#your code here
average = weighted_state_average(multipoll)
average = average.join(electoral_votes, how='outer')
default_missing(average)
model = aggregated_poll_model(average)
sims = simulate_election(model, 10000)
plot_simulation(sims)
plt.xlim(250, 400)
#your map code here
make_map(model.Obama, "P(Obama): Weighted Polls")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Homework 2
Step4: Here is some code to plot State Chloropleth maps in matplotlib. make_map is the function you will use.
Step5: Today
Step6: Background
Step7: To illustrate the use of make_map we plot the Electoral College
Step8: Question 1
Step9: 1.1 Each row is the probability predicted by Predictwise that Romney or Obama would win a state. The votes column lists the number of electoral college votes in that state. Use make_map to plot a map of the probability that Obama wins each state, according to this prediction.
Step11: Later on in this homework we will explore some approaches to estimating probabilities like these and quatifying our uncertainty about them. But for the time being, we will focus on how to make a prediction assuming these probabilities are known.
Step12: The following cells takes the necessary DataFrame for the Predictwise data, and runs 10000 simulations. We use the results to compute the probability, according to this predictive model, that Obama wins the election (i.e., the probability that he receives 269 or more electoral college votes)
Step14: 1.3 Now, write a function called plot_simulation to visualize the simulation. This function should
Step15: Lets plot the result of the Predictwise simulation. Your plot should look something like this
Step16: Evaluating and Validating our Forecast
Step18: Each row lists a state, the percent of surveyed individuals who identify as Democrat/Republican, the percent whose identification is unknown or who haven't made an affiliation yet, the margin between Democrats and Republicans (Dem_Adv
Step19: Now, we run the simulation with this model, and plot it.
Step21: 1.7 Attempt to validate the predictive model using the above simulation histogram. Does the evidence contradict the predictive model?
Step22: We construct the model by estimating the probabilities
Step23: Once again, we plot a map of these probabilities, run the simulation, and display the results
Step25: 1.9 Attempt to validate the above model using the histogram. Does the predictive distribution appear to be consistent with the real data? Comment on the accuracy and precision of the prediction.
Step26: 1.11 Simulate elections assuming a bias of 1% and 5%, and plot histograms for each one.
Step27: Note that even a small bias can have a dramatic effect on the predictions. Pundits made a big fuss about bias during the last election, and for good reason -- it's an important effect, and the models are clearly sensitive to it. Forecastors like Nate Silver would have had an easier time convincing a wide audience about their methodology if bias wasn't an issue.
Step28: 1.12 Make a scatter plot using the prediction_08 dataframe of the democratic advantage in the 2008 Gallup poll (X axis) compared to the democratic win percentage -- the difference between Obama and McCain's vote percentage -- in the election (Y Axis). Overplot a linear fit to these data.
Step29: Notice that a lot of states in which Gallup reported a Democratic affiliation, the results were strongly in the opposite direction. Why might that be? You can read more about the reasons for this here.
Step30: We compute the average difference between the Democrat advantages in the election and Gallup poll
Step31: Answer The bias was roughly 8% in favor of the Democrats in the Gallup Poll, meaning that you would want to adjust predictions based on this poll down by that amount. This was the result of people in a number of Southern and Western states claiming to be affiliated as Democrats, then voting the other way. Or, since Gallup kept polling even after the elections, it could also represent people swept away by the 2008 election euphoria in those states. This is an illustration of why one needs to be carefull with polls.
Step32: Answer
Step33: Let us also load in data about the 2004 elections from p04.csv which gets the results in the above form for the 2004 election for each state.
Step34: 2.1 Build a new DataFrame called e2008. The dataframe e2008 must have the following columns
Step35: We construct a similar frame for 2012, obtaining pvi using the 2008 Obama win data which we already have. There is no obama_win column since, well, our job is to predict it!
Step36: We load in the actual 2012 results so that we can compare our results to the predictions.
Step37: Exploratory Data Analysis
Step38: Answer
Step39: Answer
Step40: We use the above function to get the label vector and feature-sample matrix for feeding to scikit-learn. We then use the usual scikit-learn incantation fit to fit a logistic regression model with regularization parameter C. The parameter C is a hyperparameter of the model, and is used to penalize too high values of the parameter co-efficients in the loss function that is minimized to perform the logistic regression. We build a new dataframe with the usual Obama column, that holds the probabilities used to make the prediction. Finally we return a tuple of the dataframe and the classifier instance, in that order.
Step41: We are not done yet. In order to estimate C, we perform a grid search over many C to find the best C that minimizes the loss function. For each point on that grid, we carry out a n_folds-fold cross-validation. What does this mean?
Step42: Finally we write the function that we use to make our fits. It takes both the 2008 and 2012 frame as arguments, as well as the featurelist, and the number of cross-validation folds to do. It uses the above defined logistic_score to find the best-fit C, and then uses this value to return the tuple of result dataframe and classifier described above. This is the function you will be using.
Step43: 2.4 *Carry out a logistic fit using the cv_and_fit function developed above. As your featurelist use the features we have
Step44: 2.5 As before, plot a histogram and map of the simulation results, and interpret the results in terms of accuracy and precision.
Step46: Answer
Step47: 2.6 Plot your results on the classification space boundary plot. How sharp is the classification boundary, and how does this translate into accuracy and precision of the results?
Step48: Answer
Step50: 3.1 Using this data, compute a new data frame that averages the obama_spread for each state. Also compute the standard deviation of the obama_spread in each state, and the number of polls for each state.
Step51: Lets call the function on the multipoll data frame, and join it with the electoral_votes frame.
Step52: Some of the reddest and bluest states are not present in this data (people don't bother polling there as much). The default_missing function gives them strong Democratic/Republican advantages
Step54: Unweighted aggregation
Step55: 3.3 Run 10,000 simulations with this model, and plot the results. Describe the results in a paragraph -- compare the methodology and the simulation outcome to the Gallup poll. Also plot the usual map of the probabilities
Step56: Answer
Step57: Weighted Aggregation
Step59: There are only two polls in the last year! And, the results in the two polls are far, very far from the mean.
Step60: 3.5 Put this all together -- compute a new estimate of poll_mean and poll_std for each state, apply the default_missing function to handle missing rows, build a forecast with aggregated_poll_model, run 10,000 simulations, and plot the results, both as a histogram and as a map.
|
7,285
|
<ASSISTANT_TASK:>
Python Code:
import itertools as it
def lexicographicPermutations():
l=list(range(10))
r=[''.join(map(str,x)) for x in list(it.permutations(l))]
#print(len(r))
print("Millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9: "+r[999999])
lexicographicPermutations()
def fibonacciSequence():
old=1
current=1
new=0
i=2
while True:
i+=1
new=old+current
if checkLength(new):
break
old=current
current=new
return i
def checkLength(n):
if len(str(n))==1000:
return True
else:
return False
print("Index of the first term in the Fibonacci sequence to contain 1000 digits: "+str(fibonacciSequence()))
def cycleLength(n):
res = 10
j = 0
while res != 10 or j < 1:
res = (res % n) * 10
j += 1
return j
def Euler26():
long = 0
for i in range(2, 1000):
if i%2 != 0 and i%5 != 0:
length = cycleLength(i)
if length > long:
long = length
res = i
return res
print(Euler26())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 25
Step2: Problem 26
|
7,286
|
<ASSISTANT_TASK:>
Python Code:
class Soldier:
Clase que representa a un soldado
def __init__(self, name):
self.name = name
def get_name(self):
Devuelve el nombre del soldado
return self.name
def __eq__(self,another):
return self.name == another.name
alicia = Soldier("Alicia Melchiott")
alicia2 = Soldier("Alicia Melchiott")
welkin = Soldier("Welkin Gunther")
assert(alicia.get_name() == "Alicia Melchiott")
assert(welkin.get_name() == "Welkin Gunther")
assert(alicia == alicia)
assert(alicia == alicia2)
assert(alicia != welkin)
class Squad:
Clase que representa un escuadron de soldados
def __init__(self, leader):
self.leader = leader
self.squad = set()
def add_soldier(self,soldier):
Agrega un soldado al escuadron
if not isinstance(soldier,Soldier):
raise TypeError("El escuadron solo puede agregar soldados")
self.squad.add(soldier)
def kick_soldier(self):
Elimina un soldado del escuadron
pass
def get_soldiers(self):
Devuelve todos los soldados del escuadron, formados
# soldiers = [ soldier for soldier in self.squad ]
# soldiers.sort()
# return soldiers
return sorted(self.squad)
def get_soldiers_amount(self):
return len(self.squad)
escuadron = Squad(welkin)
escuadron.add_soldier(alicia)
escuadron.add_soldier(alicia2)
assert(escuadron.get_soldiers_amount()==1)
class Soldier:
Clase que representa a un soldado
def __init__(self, name):
self.name = name
def get_name(self):
Devuelve el nombre del soldado
return self.name
def __eq__(self,another):
return self.name == another.name
def __hash__(self):
return hash(self.name)
alicia = Soldier("Alicia Melchiott")
alicia2 = Soldier("Alicia Melchiott")
welkin = Soldier("Welkin Gunther")
escuadron = Squad(welkin)
escuadron.add_soldier(alicia)
escuadron.add_soldier(alicia2)
assert(escuadron.get_soldiers_amount()==1)
class Gun:
Clase que representa un arma generica
SNIPER = "Sniper"
MORTAR = "Mortar"
def __init__(self,name,damage,gun_type):
self.name = name
self.damage = damage
self.gun_type = gun_type
def attack(self,soldier):
soldier.receive_damage(self.damage,self.gun_type)
import random
class Soldier:
Clase que representa a un soldado
def __init__(self, name,gun):
self.name = name
self.gun = gun
self.hp = random.randint(600,1200)
self.shield = random.randint(1,4)
def get_name(self):
Devuelve el nombre del soldado
return self.name
def attack(self,enemy):
self.gun.attack(enemy)
def receive_damage(self,damage,gun_type):
if gun_type == Gun.SNIPER:
self.hp -= damage / self.shield
return
self.hp -= damage
def __eq__(self,another):
return self.name == another.name
def __hash__(self):
return hash(self.name)
sniper_rifle = Gun("Mk-3",200,Gun.SNIPER)
mortar_launcher = Gun("Elias",700,Gun.MORTAR)
alicia = Soldier("Alicia Melchiott",sniper_rifle)
welkin = Soldier("Welkin Gunther",mortar_launcher)
alicia.attack(welkin)
class Calculadora:
def __init__(self):
self.resultado_anterior = 0
def sumar(self,numero_1,numero_2=None):
if numero_2 is None:
numero_2 = self.resultado_anterior
self.resultado_anterior = numero_1 + numero_2
return self.resultado_anterior
def restar(self,numero_1,numero_2=None):
self.resultado_anterior = - self.sumar( - numero_1,numero_2)
return self.resultado_anterior
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Clases de objetos 19 y 22 Octubre
Step3: Pruebas
Step9: Clase Escuadron
Step10: Pruebas
Step13: Nota de clase
Step18: Clase Arma
Step19: Pruebas
Step22: Omake (La tarea que no hicieron...)
|
7,287
|
<ASSISTANT_TASK:>
Python Code:
import gym
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(True)
env = gym.make('trading-v0')
#env.time_cost_bps = 0 #
observation = env.reset()
done = False
navs = []
while not done:
action = 1 # stay flat
observation, reward, done, info = env.step(action)
navs.append(info['nav'])
if done:
print 'Annualized return: ',navs[len(navs)-1]-1
pd.DataFrame(navs).plot()
import trading_env as te
stayflat = lambda o,e: 1 # stand pat
buyandhold = lambda o,e: 2 # buy on day #1 and hold
randomtrader = lambda o,e: e.action_space.sample() # retail trader
# to run singly, we call run_strat. we are returned a dataframe containing
# all steps in the sim.
bhdf = env.run_strat(buyandhold)
print bhdf.head()
# we can easily plot our nav in time:
bhdf.bod_nav.plot(title='buy & hold nav')
env.run_strat(buyandhold).bod_nav.plot(title='same strat, different results')
env.run_strat(buyandhold).bod_nav.plot()
env.run_strat(buyandhold).bod_nav.plot()
# running a strategy multiple times should yield insights
# into its expected behavior or give it oppty to learn
bhdf = env.run_strats(buyandhold,100)
rdf = env.run_strats(randomtrader,100)
comparo = pd.DataFrame({'buyhold':bhdf.mean(),
'random': rdf.mean()})
comparo
import tensorflow as tf
import policy_gradient
# create the tf session
sess = tf.InteractiveSession()
# create policygradient
pg = policy_gradient.PolicyGradient(sess, obs_dim=5, num_actions=3, learning_rate=1e-2 )
# and now let's train it and evaluate its progress. NB: this could take some time...
df,sf = pg.train_model( env,episodes=25001, log_freq=100)#, load_model=True)
sf['net'] = sf.simror - sf.mktror
#sf.net.plot()
sf.net.expanding().mean().plot()
sf.net.rolling(100).mean().plot()
sf.net.rolling(100).mean().tail()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: create the environment
Step2: the trading model
Step3: Note that you are charged just for playing - to the tune of 1 basis point per day!
Step4: running the same strategy multiple times will likely yield different results as underlying data changes
Step5: comparing the buyandhold and random traders
Step6: Object of the game
Step7: Results
|
7,288
|
<ASSISTANT_TASK:>
Python Code:
import cPickle
import os
import re
import shutil
import tarfile
import tensorflow as tf
print(tf.__version__)
CIFAR_FILENAME = 'cifar-10-python.tar.gz'
CIFAR_DOWNLOAD_URL = 'http://www.cs.toronto.edu/~kriz/' + CIFAR_FILENAME
CIFAR_LOCAL_FOLDER = 'cifar-10-batches-py'
def _download_and_extract(data_dir):
tf.contrib.learn.datasets.base.maybe_download(CIFAR_FILENAME, data_dir, CIFAR_DOWNLOAD_URL)
tarfile.open(os.path.join(data_dir, CIFAR_FILENAME), 'r:gz').extractall(data_dir)
def _get_file_names():
Returns the file names expected to exist in the input_dir.
file_names = {}
file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 5)]
file_names['validation'] = ['data_batch_5']
file_names['eval'] = ['test_batch']
return file_names
def _read_pickle_from_file(filename):
with tf.gfile.Open(filename, 'r') as f:
data_dict = cPickle.load(f)
return data_dict
def _convert_to_tfrecord(input_files, output_file):
Converts a file to TFRecords.
print('Generating %s' % output_file)
with tf.python_io.TFRecordWriter(output_file) as record_writer:
for input_file in input_files:
data_dict = _read_pickle_from_file(input_file)
data = data_dict['data']
labels = data_dict['labels']
num_entries_in_batch = len(labels)
for i in range(num_entries_in_batch):
example = tf.train.Example(features=tf.train.Features(
feature={
'image': _bytes_feature(data[i].tobytes()),
'label': _int64_feature(labels[i])
}))
record_writer.write(example.SerializeToString())
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def create_tfrecords_files(data_dir='cifar-10'):
_download_and_extract(data_dir)
file_names = _get_file_names()
input_dir = os.path.join(data_dir, CIFAR_LOCAL_FOLDER)
for mode, files in file_names.items():
input_files = [os.path.join(input_dir, f) for f in files]
output_file = os.path.join(data_dir, mode+'.tfrecords')
try:
os.remove(output_file)
except OSError:
pass
# Convert to tf.train.Example and write to TFRecords.
_convert_to_tfrecord(input_files, output_file)
create_tfrecords_files()
class FLAGS():
pass
FLAGS.batch_size = 200
FLAGS.max_steps = 1000
FLAGS.eval_steps = 100
FLAGS.save_checkpoints_steps = 100
FLAGS.tf_random_seed = 19851211
FLAGS.model_name = 'cnn-model-02'
FLAGS.use_checkpoint = False
IMAGE_HEIGHT = 32
IMAGE_WIDTH = 32
IMAGE_DEPTH = 3
NUM_CLASSES = 10
def parse_record(serialized_example):
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features['image'], tf.uint8)
image.set_shape([IMAGE_DEPTH * IMAGE_HEIGHT * IMAGE_WIDTH])
image = tf.reshape(image, [IMAGE_DEPTH, IMAGE_HEIGHT, IMAGE_WIDTH])
image = tf.cast(tf.transpose(image, [1, 2, 0]), tf.float32)
label = tf.cast(features['label'], tf.int32)
label = tf.one_hot(label, NUM_CLASSES)
return image, label
def preprocess_image(image, is_training=False):
Preprocess a single image of layout [height, width, depth].
if is_training:
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(
image, IMAGE_HEIGHT + 8, IMAGE_WIDTH + 8)
# Randomly crop a [_HEIGHT, _WIDTH] section of the image.
image = tf.random_crop(image, [IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
return image
def generate_input_fn(file_names, mode=tf.estimator.ModeKeys.EVAL, batch_size=1):
def _input_fn():
dataset = tf.data.TFRecordDataset(filenames=file_names)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if is_training:
buffer_size = batch_size * 2 + 1
dataset = dataset.shuffle(buffer_size=buffer_size)
# Transformation
dataset = dataset.map(parse_record)
dataset = dataset.map(
lambda image, label: (preprocess_image(image, is_training), label))
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2 * batch_size)
images, labels = dataset.make_one_shot_iterator().get_next()
features = {'images': images}
return features, labels
return _input_fn
def get_feature_columns():
feature_columns = {
'images': tf.feature_column.numeric_column('images', (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH)),
}
return feature_columns
feature_columns = get_feature_columns()
print("Feature Columns: {}".format(feature_columns))
def inference(images):
# 1st Convolutional Layer
conv1 = tf.layers.conv2d(
inputs=images, filters=64, kernel_size=[5, 5], padding='same',
activation=tf.nn.relu, name='conv1')
pool1 = tf.layers.max_pooling2d(
inputs=conv1, pool_size=[3, 3], strides=2, name='pool1')
norm1 = tf.nn.lrn(
pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# 2nd Convolutional Layer
conv2 = tf.layers.conv2d(
inputs=norm1, filters=64, kernel_size=[5, 5], padding='same',
activation=tf.nn.relu, name='conv2')
norm2 = tf.nn.lrn(
conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
pool2 = tf.layers.max_pooling2d(
inputs=norm2, pool_size=[3, 3], strides=2, name='pool2')
# Flatten Layer
shape = pool2.get_shape()
pool2_ = tf.reshape(pool2, [-1, shape[1]*shape[2]*shape[3]])
# 1st Fully Connected Layer
dense1 = tf.layers.dense(
inputs=pool2_, units=384, activation=tf.nn.relu, name='dense1')
# 2nd Fully Connected Layer
dense2 = tf.layers.dense(
inputs=dense1, units=192, activation=tf.nn.relu, name='dense2')
# 3rd Fully Connected Layer (Logits)
logits = tf.layers.dense(
inputs=dense2, units=NUM_CLASSES, activation=tf.nn.relu, name='logits')
return logits
def model_fn(features, labels, mode, params):
# Create the input layers from the features
feature_columns = list(get_feature_columns().values())
images = tf.feature_column.input_layer(
features=features, feature_columns=feature_columns)
images = tf.reshape(
images, shape=(-1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH))
# Calculate logits through CNN
logits = inference(images)
if mode in (tf.estimator.ModeKeys.PREDICT, tf.estimator.ModeKeys.EVAL):
predicted_indices = tf.argmax(input=logits, axis=1)
probabilities = tf.nn.softmax(logits, name='softmax_tensor')
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
global_step = tf.train.get_or_create_global_step()
label_indices = tf.argmax(input=labels, axis=1)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=labels, logits=logits)
tf.summary.scalar('cross_entropy', loss)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'classes': predicted_indices,
'probabilities': probabilities
}
export_outputs = {
'predictions': tf.estimator.export.PredictOutput(predictions)
}
return tf.estimator.EstimatorSpec(
mode, predictions=predictions, export_outputs=export_outputs)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode, loss=loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(label_indices, predicted_indices)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def serving_input_fn():
receiver_tensor = {'images': tf.placeholder(
shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH], dtype=tf.float32)}
features = {'images': tf.map_fn(preprocess_image, receiver_tensor['images'])}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensor)
model_dir = 'trained_models/{}'.format(FLAGS.model_name)
train_data_files = ['cifar-10/train.tfrecords']
valid_data_files = ['cifar-10/validation.tfrecords']
test_data_files = ['cifar-10/eval.tfrecords']
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
model_dir=model_dir
)
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# There is another Exporter named FinalExporter
exporter = tf.estimator.LatestExporter(
name='Servo',
serving_input_receiver_fn=serving_input_fn,
assets_extra=None,
as_text=False,
exports_to_keep=5)
train_spec = tf.estimator.TrainSpec(
input_fn=generate_input_fn(file_names=train_data_files,
mode=tf.estimator.ModeKeys.TRAIN,
batch_size=FLAGS.batch_size),
max_steps=FLAGS.max_steps)
eval_spec = tf.estimator.EvalSpec(
input_fn=generate_input_fn(file_names=valid_data_files,
mode=tf.estimator.ModeKeys.EVAL,
batch_size=FLAGS.batch_size),
steps=FLAGS.eval_steps, exporters=exporter)
if not FLAGS.use_checkpoint:
print("Removing previous artifacts...")
shutil.rmtree(model_dir, ignore_errors=True)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
test_input_fn = generate_input_fn(file_names=test_data_files,
mode=tf.estimator.ModeKeys.EVAL,
batch_size=1000)
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
print(estimator.evaluate(input_fn=test_input_fn, steps=1))
export_dir = model_dir + '/export/Servo/'
saved_model_dir = os.path.join(export_dir, os.listdir(export_dir)[-1])
predictor_fn = tf.contrib.predictor.from_saved_model(
export_dir = saved_model_dir,
signature_def_key='predictions')
import numpy
data_dict = _read_pickle_from_file('cifar-10/cifar-10-batches-py/test_batch')
N = 1000
images = data_dict['data'][:N].reshape([N, 3, 32, 32]).transpose([0, 2, 3, 1])
labels = data_dict['labels'][:N]
output = predictor_fn({'images': images})
accuracy = numpy.sum(
[ans==ret for ans, ret in zip(labels, output['classes'])]) / float(N)
print(accuracy)
import os
PROJECT = 'YOUR-PROJECT-ID' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'YOUR-BUCKET-NAME' # REPLACE WITH YOUR BUCKET NAME
REGION = 'BUCKET-REGION' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdin); \
print response['serviceAccount']")
echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET"
gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET
gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored
gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET
%%bash
echo ${BUCKET}
gsutil -m rm -rf gs://${BUCKET}/cifar-10
gsutil -m cp cifar-10/*.tfrecords gs://${BUCKET}/cifar-10
%%bash
OUTDIR=gs://$BUCKET/trained_models_3cpu
JOBNAME=sm_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=cnn-model-02.task \
--package-path="$(pwd)/trainer/cnn-model-02" \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--config=config_3cpu.yaml \
--runtime-version=1.4 \
-- \
--bucket_name=$BUCKET \
--train_data_pattern=cifar-10/train*.tfrecords \
--eval_data_pattern=cifar-10/eval*.tfrecords \
--output_dir=$OUTDIR \
--max_steps=10000
%%bash
OUTDIR=gs://$BUCKET/trained_models_3gpu
JOBNAME=sm_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=cnn-model-02.task \
--package-path="$(pwd)/trainer/cnn-model-02" \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--config=config_3gpu.yaml \
--runtime-version=1.4 \
-- \
--bucket_name=$BUCKET \
--train_data_pattern=cifar-10/train*.tfrecords \
--eval_data_pattern=cifar-10/eval*.tfrecords \
--output_dir=$OUTDIR \
--max_steps=10000
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: This notebook describes how to implement distributed tensorflow code.
Step3: 2. Define parameters
Step5: 3. Define data input pipeline
Step6: 4. Define features
Step7: 5. Define a model
Step8: 6. Define a serving function
Step9: 7. Train, evaluate and export a model
Step10: 8. Evaluation with Estimator
Step11: 9. Prediction with Exported Model
Step12: 10. Distributed Training with Cloud ML Engine
Step13: b. Set permission to BUCKET (NOTE
Step14: c. Copy TFRecords files to GCS BUCKET
Step15: d. Run distributed training with Cloud MLE
|
7,289
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
#Description from Prestashop
df_description = pd.read_csv('sql_prestashop/ps_product_lang.csv', index_col=False)
#wp_posts from Woocommerce
wp_posts = pd.read_csv('sql_prestashop/wp_posts.csv', index_col=False)
#Use only English "Description" & "Short description" from Prestashop.
df_description = df_description[df_description['id_lang'] == 1]
eng_des = wp_posts.sort_values('ID')
eng_des = eng_des[eng_des['post_type'] == 'product']
#Change column name for merging dataframe.
eng_des = eng_des.rename(columns = {'post_title':'name'})
#merge.
eng_des = pd.merge(eng_des, df_description[['name', 'description', 'description_short']], how='left', on='name')
eng_des['post_excerpt'] = eng_des['description_short']
eng_des['post_content'] = eng_des['description']
#Drop unused column.
eng_des = eng_des.drop(['description', 'description_short'], axis=1)
#Check for duplicate entry.
count = eng_des['name'].value_counts()
#Drop duplicate name of product.
eng_des = eng_des.drop_duplicates(subset='name')
#Create series for Thai ID.
th_id = eng_des['ID']
#Changing "name" column back to "post_title". Protect the error when concatenating
#(After concatenate, it will have 24 column).
eng_des = eng_des.rename(columns = {'name':'post_title'})
empty = np.empty(eng_des.shape[0])
description = pd.Series(empty)
description[0] = ''
for i in range(0, eng_des.shape[0]):
string = eng_des['post_content'].iloc[i]
if pd.isnull(string) == False:
string = string.replace("เรื่องย่อ", "Sypnosis")
string = string.replace("ส่วนที่อยากบอก", "Artist said")
string = string.replace("ส่วนที่ผู้จัดทำอยากบอก", "Artist said")
string = string.replace("ช่องทางการติดต่อ", "Contact")
string = string.replace("ผู้จัดทำ", "Artist")
string = string.replace("จากเรื่อง", "Parody")
string = string.replace("อ้างอิงจากเรื่อง", "Parody")
string = string.replace("ลักษณะของสินค้า", "Details of the product")
string = string.replace("ออกขายครั้งแรก", "Publication date")
string = string.replace("ชื่อสินค้า", "Product")
string = string.replace("ชื่อผลงาน", "Product")
description[i] = string
eng_des['post_content'] = description
#Find max "ID" of wp_posts. Use max ID + 1 to be the starting ID of English product.
max_id = wp_posts['ID'].max()
#Create Series of number as a new "ID".
eng_start = max_id + 1
eng_end = eng_start + eng_des.shape[0]
number = pd.Series(range(eng_start, eng_end))
#Reset eng_des index
eng_des = eng_des.reset_index()
#Drop old index column.
eng_des = eng_des.drop(['index'], axis=1)
eng_des['ID'] = number
#Create Series od number as a new index.
max_index = wp_posts.index.values.max()
eng_index_start = max_index + 1
eng_index_end = eng_index_start + eng_des.shape[0]
number = pd.Series(range(eng_index_start, eng_index_end))
eng_des['number'] = number
eng_des = eng_des.set_index(number)
#Drop unused column.
eng_des = eng_des.drop(['number'], axis=1)
#Generate wp_posts for images
image = wp_posts[wp_posts['post_type'] == 'attachment']
image = image[image['post_parent'] != 0]
#Check type of "post_mime_type"
count = image['post_mime_type'].value_counts()
#Check how many products.
count = image['post_parent'].value_counts()
new = eng_des['ID'].reset_index()
new = new.drop(['index'], axis=1)
new = new['ID']
old = th_id.reset_index()
old = old.drop(['index'], axis=1)
old = old['ID']
#reset index
image = image.reset_index()
image = image.drop(['index'], axis=1)
image['number'] = image['post_parent']
image = image.set_index('number')
image = image.loc[th_id]
#Check for nan value.
image['post_parent'].isnull().sum()
#Drop rows that have nan English ID.
image = image[np.isfinite(image['post_parent'])]
dic = {}
for i in range(len(old)):
dic[old[i]] = new[i]
image['post_parent'] = [dic[x] for x in image['post_parent']]
max_id = eng_des['ID'].max()
#Create Series of number as a new "ID".
start = max_id + 1
end = start + image.shape[0]
number = pd.Series(range(start, end))
#Reset eng_des index
image = image.reset_index()
#Drop old index column.
image = image.drop(['number'], axis=1)
image['ID'] = number
#Create Series od number as a new index.
max_index = eng_des.index.values.max()
index_start = max_index + 1
index_end = index_start + image.shape[0]
number = pd.Series(range(index_start, index_end))
image['number'] = number
image = image.set_index(number)
#Drop unused column.
image = image.drop(['number'], axis=1)
wp_posts_with_eng = pd.concat([wp_posts, eng_des, image], axis=0)
wp_posts_with_eng = wp_posts_with_eng.sort_values('ID')
wp_posts_with_eng.to_csv('product_import_to_woo/wp_posts_with_eng.csv', encoding='utf-8', index=False)
wp_postmeta = pd.read_csv('sql_prestashop/wp_postmeta.csv', index_col=False)
meta = pd.read_csv('sql_prestashop/wp_postmeta.csv', index_col=False)
meta['number'] = meta['post_id']
meta = meta.set_index('number')
meta = meta.loc[th_id]
meta['post_id'] = [dic[x] for x in meta['post_id']]
max_id = wp_postmeta['meta_id'].max()
#Create Series of number as a new "ID".
start = max_id + 1
end = start + meta.shape[0]
number = pd.Series(range(start, end))
#Reset eng_des index
meta = meta.reset_index()
#Drop old index column.
meta = meta.drop(['number'], axis=1)
meta['meta_id'] = number
#Set new index
#Create Series od number as a new index.
max_index = wp_postmeta.index.values.max()
index_start = max_index + 1
index_end = index_start + meta.shape[0]
number = pd.Series(range(index_start, index_end))
meta['number'] = number
meta = meta.set_index(number)
#Drop unused column.
meta = meta.drop(['number'], axis=1)
wp_postmeta_with_eng = pd.concat([wp_postmeta, meta], axis=0)
wp_postmeta_with_eng = wp_postmeta_with_eng.sort_values('post_id')
#Change SKU format.
for i in range(wp_postmeta_with_eng.shape[0]):
if wp_postmeta_with_eng['meta_key'].iloc[i] == '_sku':
wp_postmeta_with_eng['meta_value'].iloc[i] = 'A' + str(wp_postmeta_with_eng['meta_value'].iloc[i]).zfill(5)
x = wp_postmeta_with_eng['meta_value'].iloc[i]
wp_postmeta_with_eng.to_csv('product_import_to_woo/wp_postmeta_with_eng.csv', encoding='utf-8', index=False)
translations = pd.read_csv('sql_prestashop/wp_icl_translations.csv', index_col=False)
thai_des = wp_posts[wp_posts['post_type'] == 'product']
thai_des = thai_des[['ID', 'post_title']]
eng_des = eng_des[['ID', 'post_title']]
tr = translations[translations['element_type'] == 'post_product']
#Change column name for merging dataframe.
tr = tr.rename(columns = {'element_id':'ID'})
#Merge thai_des first. The result dataframe will have new "post_title" column.
tr = pd.merge(tr, thai_des, how='left', on='ID')
#Merge English description. Now we use "post_title" as a key.
tr = pd.merge(tr, eng_des, how='left', on='post_title')
#Rename ID_x and ID_y for comfortable use.
tr = tr.rename(columns = {'ID_x':'ID_th', 'ID_y':'ID_en'})
tr = tr[np.isfinite(tr['ID_en'])]
#Check for duplicate entry.
count = tr['ID_en'].value_counts()
#Drop duplicate name of product.
tr = tr.drop_duplicates(subset='ID_en')
#Find max "ID" of wp_icl_translations. Use max ID + 1 to be the starting ID of English description.
max_id = translations['translation_id'].max()
#Create Series of number as a new "ID".
start = max_id + 1
end = start + tr.shape[0]
number = pd.Series(range(start, end))
#Create new dataframe in wp_icl_translations form. Then collect the processed
#English id's wp_icl_translations data.
dataframe = pd.DataFrame({
'translation_id': number,
'element_type' : 'post_product',
'element_id': tr['ID_en'].values,
'trid': tr['trid'].values,
'language_code': 'en',
'source_language_code': 'th'},
columns=['translation_id', 'element_type', 'element_id', 'trid', 'language_code', 'source_language_code'])
#Set new index, so it will continue from the last index in wp_icl_translations.
start = translations.shape[0]
end = start + dataframe.shape[0]
number = pd.Series(range(start,end))
dataframe = dataframe.set_index(number)
wp_icl_translations = pd.concat([translations, dataframe])
wp_icl_translations["source_language_code"] = wp_icl_translations["source_language_code"].fillna('NULL')
wp_icl_translations.to_csv('product_import_to_woo/wp_icl_translations_to_import.csv', encoding='utf-8', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We load the data. There are
Step2: We select only the English description from Prestashop database.
Step3: Next step, we fill the "Description" & "Short description" in "wp_posts".
Step4: "wp_posts" has many post types. We extract only "product" type.
Step5: Change column name for merge. Then we merge "eng_des" and "df_description" by name of a products. This will ensure that we match the right description to the right product.
Step6: Fill wp_post new "Description and "Short description". Then drop the unuse
Step7: There are some duplicate entry because some entry share the same name.
Step8: Changing "name" column back to "post_title". Protect the error when concatenating (after concatenate, it will have 24 column).
Step9: Change some error strings.
Step10: Then we run a for loop to replace a string.
Step11: Change ID and Index number.
Step12: Create "wp_posts" entry for images.
Step13: Some images entry don't have a parent product. We select only the entries that have "post_parent" value.
Step14: Let's check by counting "post_mime_type" and "post_parent". "post_mime_type" is a type of file such as ".jpg", ".png". Use value_counts() will show the type of each file in the data and how number of them in each category. Counting the "post_parent" show nymber of images in each ID.
Step15: Now we have to change "post_parent" to match the image to the right product. We use Python's dictionary to translate from old "post_parent" to a new one.
Step16: Some post_parent doesn't appear in dictionary. Pick only the one we have in Thai ID.
Step17: Check for nan value and drop rows that have nan English ID.
Step18: Create a dictionary.
Step19: Translate old to new "ID"
Step20: Set new ID and index number.
Step21: Concatenate to "wp_posts"
Step22: Arrange index.
Step23: Export new raw_product and upload to Woocommerce.
Step24: Creating the "wp_postmeta"
Step25: "meta" still have other posts. We need to extract only product, so we bring "meta" with the same ID as Thai products.
Step26: Translate old to new "ID".
Step27: Set new ID and index number.
Step28: Concatenate with the old wp_posts.
Step29: Arrange index.
Step30: For a comfortable sorting the product in backend, We change the "SKU" into new format.
Step31: Export to .csv
Step32: Creating the "wp_icl_translations"
Step33: Merging Thai & English Description to wp_icl_translations to make sure we match the correct ID.
Step34: First, we merge Thai product to "wp_icl_translations".
Step35: Then we merge it with English product description by "post_title". The reason we use "post_title" as a key because Thai and English product don't share the same ID.
Step36: Drop rows that English ID has a nan value.
Step37: Using the value_counts() to find duplicate values. If there are duplicate values, we drop it.
Step38: Create a series with the size of English products.
Step39: Creating a new dataframe in the format of wp_icl_translations.
Step40: Set new index for dataframe.
Step41: Concatenating dataframe to "translations" and name it as wp_icl_translations
Step42: "source_language_code" must fill with "NULL" in order to make Woocommerce working properly.
Step43: Export to .csv
|
7,290
|
<ASSISTANT_TASK:>
Python Code:
# Used for card shuffle
import random
# Boolean used to know if hand is in play
playing = False
chip_pool = 100 # Could also make this a raw input.
bet = 1
restart_phrase = "Press 'd' to deal the cards again, or press 'q' to quit"
# Hearts, Diamonds,Clubs,Spades
suits = ('H','D','C','S')
# Possible card ranks
ranking = ('A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K')
# Point values dict (Note: Aces can also be 11, check self.ace for details)
card_val = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':10, 'Q':10, 'K':10}
# Create a card class
class Card:
def __init__(self,suit,rank):
self.suit = suit
self.rank = rank
def __str__(self):
return self.suit + self.rank
def grab_suit(self):
return self.suit
def grab_rank(self):
return self.rank
def draw(self):
print (self.suit + self.rank)
# Create a hand class
class Hand:
def __init__(self):
self.cards = []
self.value = 0
# Aces can be 1 or 11 so need to define it here
self.ace = False
def __str__(self):
''' Return a string of current hand composition'''
hand_comp = ""
# Better way to do this? List comprehension?
for card in self.cards:
card_name = card.__str__()
hand_comp += " " + card_name
return 'The hand has %s' %hand_comp
def card_add(self,card):
''' Add another card to the hand'''
self.cards.append(card)
# Check for Aces
if card.rank == 'A':
self.ace = True
self.value += card_val[card.rank]
def calc_val(self):
'''Calculate the value of the hand, make aces an 11 if they don't bust the hand'''
if (self.ace == True and self.value < 12):
return self.value + 10
else:
return self.value
def draw(self,hidden):
if hidden == True and playing == True:
#Don't show first hidden card
starting_card = 1
else:
starting_card = 0
for x in range(starting_card,len(self.cards)):
self.cards[x].draw()
class Deck:
def __init__(self):
''' Create a deck in order '''
self.deck = []
for suit in suits:
for rank in ranking:
self.deck.append(Card(suit,rank))
def shuffle(self):
''' Shuffle the deck, python actually already has a shuffle method in its random lib '''
random.shuffle(self.deck)
def deal(self):
''' Grab the first item in the deck '''
single_card = self.deck.pop()
return single_card
def __str__(self):
deck_comp = ""
for card in self.cards:
deck_comp += " " + deck_comp.__str__()
return "The deck has" + deck_comp
# First Bet
def make_bet():
''' Ask the player for the bet amount and '''
global bet
bet = 0
print ' What amount of chips would you like to bet? (Enter whole integer please) '
# While loop to keep asking for the bet
while bet == 0:
bet_comp = raw_input() # Use bet_comp as a checker
bet_comp = int(bet_comp)
# Check to make sure the bet is within the remaining amount of chips left.
if bet_comp >= 1 and bet_comp <= chip_pool:
bet = bet_comp
else:
print "Invalid bet, you only have " + str(chip_pool) + " remaining"
def deal_cards():
''' This function deals out cards and sets up round '''
# Set up all global variables
global result,playing,deck,player_hand,dealer_hand,chip_pool,bet
# Create a deck
deck = Deck()
#Shuffle it
deck.shuffle()
#Set up bet
make_bet()
# Set up both player and dealer hands
player_hand = Hand()
dealer_hand = Hand()
# Deal out initial cards
player_hand.card_add(deck.deal())
player_hand.card_add(deck.deal())
dealer_hand.card_add(deck.deal())
dealer_hand.card_add(deck.deal())
result = "Hit or Stand? Press either h or s: "
if playing == True:
print 'Fold, Sorry'
chip_pool -= bet
# Set up to know currently playing hand
playing = True
game_step()
def hit():
''' Implement the hit button '''
global playing,chip_pool,deck,player_hand,dealer_hand,result,bet
# If hand is in play add card
if playing:
if player_hand.calc_val() <= 21:
player_hand.card_add(deck.deal())
print "Player hand is %s" %player_hand
if player_hand.calc_val() > 21:
result = 'Busted! '+ restart_phrase
chip_pool -= bet
playing = False
else:
result = "Sorry, can't hit" + restart_phrase
game_step()
def stand():
global playing,chip_pool,deck,player_hand,dealer_hand,result,bet
''' This function will now play the dealers hand, since stand was chosen '''
if playing == False:
if player_hand.calc_val() > 0:
result = "Sorry, you can't stand!"
# Now go through all the other possible options
else:
# Soft 17 rule
while dealer_hand.calc_val() < 17:
dealer_hand.card_add(deck.deal())
# Dealer Busts
if dealer_hand.calc_val() > 21:
result = 'Dealer busts! You win!' + restart_phrase
chip_pool += bet
playing = False
#Player has better hand than dealer
elif dealer_hand.calc_val() < player_hand.calc_val():
result = 'You beat the dealer, you win!' + restart_phrase
chip_pool += bet
playing = False
# Push
elif dealer_hand.calc_val() == player_hand.calc_val():
result = 'Tied up, push!' + restart_phrase
playing = False
# Dealer beats player
else:
result = 'Dealer Wins!' + restart_phrase
chip_pool -= bet
playing = False
game_step()
def game_step():
'Function to print game step/status on output'
#Display Player Hand
print ""
print('Player Hand is: '),
player_hand.draw(hidden =False)
print 'Player hand total is: '+str(player_hand.calc_val())
#Display Dealer Hand
print('Dealer Hand is: '),
dealer_hand.draw(hidden=True)
# If game round is over
if playing == False:
print " --- for a total of " + str(dealer_hand.calc_val() )
print "Chip Total: " + str(chip_pool)
# Otherwise, don't know the second card yet
else:
print " with another card hidden upside down"
# Print result of hit or stand.
print result
player_input()
def game_exit():
print 'Thanks for playing!'
exit()
def player_input():
''' Read user input, lower case it just to be safe'''
plin = raw_input().lower()
if plin == 'h':
hit()
elif plin == 's':
stand()
elif plin == 'd':
deal_cards()
elif plin == 'q':
game_exit()
else:
print "Invalid Input...Enter h, s, d, or q: "
player_input()
def intro():
statement = '''Welcome to BlackJack! Get as close to 21 as you can without going over!
Dealer hits until she reaches 17. Aces count as 1 or 11.
Card output goes a letter followed by a number of face notation'''
print statement
'''The following code will initiate the game! (Note: Need to Run all Cells)'''
# Create a Deck
deck = Deck()
#Shuffle it
deck.shuffle()
# Create player and dealer hands
player_hand = Hand()
dealer_hand = Hand()
#Print the intro
intro()
# Deal out the cards and start the game!
deal_cards()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now I'll make a card class, it will have some basic ID functions, and then some functions to grab the suit and rank of the card.
Step2: Now I'll make a hand class, this class will have functions to take into account the Ace situation
Step3: Next I'll make a deck class
Step4: Now that the classes are done, time for the cool part, creating the actual game!
Step5: Next, make a function setting up the game and for dealing out the cards.
Step6: Now make the hit function
Step7: Now make the stand function
Step8: Function to print results and ask user for next step
Step9: Function for exiting the game
Step10: Function to read user input
Step11: Make a quick intro for the game
Step12: Now to play the game!
|
7,291
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from matplotlib import pyplot as plt
import pickle
import os
%pylab inline
clmax=5
spc=5e2
theta_range=2
#samples is list of labels
samples=np.zeros(spc*clmax,dtype=np.uint32)
#I is fessture vector
I=np.zeros((spc*clmax,theta_range),dtype=np.float32)
marker=['bo','co','go','ro','mo','yo','ko',
'bs','cs','gs','rs','ms','ys','ks']
# number of datasets being generated
# 8 for training
# another one for evaluation
N=9
path="train/"
if not os.path.exists(path):
os.makedirs(path)
for n in xrange(N):
for cl in xrange(clmax):
xo=cl*spc
#define label
samples[xo:xo+spc]=cl
phi = np.linspace(0, 2*np.pi, spc) + \
np.random.randn(spc)*0.4*np.pi/clmax + \
2*np.pi*cl/clmax
r = np.linspace(0.1, 1, spc)
I[xo:xo+spc,:]=np.transpose(np.array([r*np.cos(phi), r*np.sin(phi)]))
with open(path+'dataset%02d.pic'%(n), 'wb') as pickleFile:
#write label and feature vector
theta_dim=1
pickle.dump((clmax,theta_dim,theta_range,len(samples),samples,I,None), pickleFile, pickle.HIGHEST_PROTOCOL)
z=np.random.randint( 0,spc*clmax,1000)
for i in z:
#ax.plot(dset.I[i,0],dset.I[i,1],marker[dset2.samples[i]])
plt.plot(I[i,0],I[i,1],marker[samples[i]])
plt.hold(True)
from pforest.master import master
m=master()
m.reset()
m.train()
with open('out_tree.pic', 'wb') as pickleFile:
pickle.dump(m.root, pickleFile, pickle.HIGHEST_PROTOCOL)
with open('out_tree.pic', 'rb') as pickleFile:
root = pickle.load(pickleFile)
ls
from pforest.dataset import dataset
from pforest.tree import tree
#init the test tree
t=tree()
t.settree(root)
t.show()
#load the last dataset that never use for training
dset=dataset(8)
correct=0;
for x in xrange(dset.size):
L=t.getL(np.array([x]),dset)
if dset.getL(x) == L:
correct=correct+1
dset.setL(x,L)
print("recall rate: {}%".format(correct/float(dset.size)*100))
#setup the new test-set
#load dataset
dset=dataset(8)
d=0.05
y, x = np.mgrid[slice(-1, 1+d, d), slice(-1, 1+d, d)]
#start labeling
L=np.zeros(x.shape,dtype=int)
for r in xrange(x.shape[0]):
for c in xrange(x.shape[1]):
u=( x[r,c],y[r,c] )
Prob=t.classify(u)
L[r,c]=np.argmax(Prob)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.axis([-1,1,-1,1])
ax.pcolor(x,y,L)
ax.hold(True)
z=np.random.randint(0,dset.size,1000)
for i in z:
ax.plot(dset.I[i,0],dset.I[i,1],marker[dset.samples[i]])
fig
t.classify([0.75,0.0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating datasets
Step2: Visualization of the dataset
Step3: Training
Step4: Write and read the tree
Step5: Check the file size
Step6: The result decision tree
Step7: Recall rate
Step8: Labelling
Step9: 2D space partitioning by the decision tree
Step10: Overlay the dataset
|
7,292
|
<ASSISTANT_TASK:>
Python Code:
# import
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
random_numbers = np.empty(100000)
for i in range(100000):
random_numbers[i] = np.random.random()
plt.hist(random_numbers)
def perform_bernoulli_trials(n, p):
Perform n Bernoulli trials with success probability p
and return number of successes.
n_success=0
for i in range(n):
random_no = np.random.random()
if random_no < p:
n_success += 1
return n_success
# checking tuhe probability of getting 0.5 value per 100
np.random.seed(42)
n_success = np.empty(1000)
for i in range(1000):
n_success[i] = perform_bernoulli_trials(100, 0.5)
plt.hist(n_success, normed=True)
def ecdf(data):
n = len(data)
x = np.sort(data)
y = np.arange(1, n+1)/n
return x,y
np.random.seed(42)
n_succ = np.random.binomial(100, 0.5, size=1000)
print(n_succ[:4])
x, y = ecdf(n_succ)
plt.plot(x, y, marker=".", linestyle='none')
plt.xlabel("X")
plt.ylabel("Y")
plt.title("ECDF plot")
plt.margins(0.05)
plt.show()
plt.hist(n_success, normed=True)
# Draw 10,000 samples out of Poisson distribution: samples_poisson
samples_poisson = np.random.poisson(10, size=10000)
# Print the mean and standard deviation
print('Poisson: ', np.mean(samples_poisson),
np.std(samples_poisson))
# Specify values of n and p to consider for Binomial: n, p
n = [20, 100, 1000]
p = np.array([0.5, 0.1, 0.01])
# Draw 10,000 samples for each n,p pair: samples_binomial
for i in range(3):
samples_binomial = np.random.binomial(n[i], p[i], size=10000)
# Print results
print('n =', n[i], 'Binom:', np.mean(samples_binomial),
np.std(samples_binomial))
# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10
samples_std1 = np.random.normal(20, 1, size=100000)
samples_std3 = np.random.normal(20, 3, size=100000)
samples_std10 = np.random.normal(20, 10, size=100000)
# Make histograms
plt.hist(samples_std1, bins=100, normed=True, histtype='step')
plt.hist(samples_std3, bins=100, normed=True, histtype='step')
plt.hist(samples_std10, bins=100, normed=True, histtype='step')
# Make a legend, set limits and show plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))
plt.ylim(-0.01, 0.42)
plt.show()
# Generate CDFs
x_std1, y_std1 = ecdf(samples_std1)
x_std3, y_std3 = ecdf(samples_std3)
x_std10, y_std10 = ecdf(samples_std10)
# Plot CDFs
plt.plot(x_std1, y_std1, marker='.', linestyle='none')
plt.plot(x_std3, y_std1, marker='.', linestyle='none')
plt.plot(x_std10, y_std1, marker='.', linestyle='none')
# Make 2% margin
plt.margins(0.02)
# Make a legend and show the plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')
plt.show()
belmont_no_outliers=np.array([ 148.51, 146.65, 148.52, 150.7 , 150.42, 150.88, 151.57,
147.54, 149.65, 148.74, 147.86, 148.75, 147.5 , 148.26,
149.71, 146.56, 151.19, 147.88, 149.16, 148.82, 148.96,
152.02, 146.82, 149.97, 146.13, 148.1 , 147.2 , 146. ,
146.4 , 148.2 , 149.8 , 147. , 147.2 , 147.8 , 148.2 ,
149. , 149.8 , 148.6 , 146.8 , 149.6 , 149. , 148.2 ,
149.2 , 148. , 150.4 , 148.8 , 147.2 , 148.8 , 149.6 ,
148.4 , 148.4 , 150.2 , 148.8 , 149.2 , 149.2 , 148.4 ,
150.2 , 146.6 , 149.8 , 149. , 150.8 , 148.6 , 150.2 ,
149. , 148.6 , 150.2 , 148.2 , 149.4 , 150.8 , 150.2 ,
152.2 , 148.2 , 149.2 , 151. , 149.6 , 149.6 , 149.4 ,
148.6 , 150. , 150.6 , 149.2 , 152.6 , 152.8 , 149.6 ,
151.6 , 152.8 , 153.2 , 152.4 , 152.2 ])
# Compute mean and standard deviation: mu, sigma
mu, sigma = np.mean(belmont_no_outliers), np.std(belmont_no_outliers)
# Sample out of a normal distribution with this mu and sigma: samples
samples = np.random.normal(mu, sigma, size=10000)
# Get the CDF of the samples and of the data
x_theor, y_theor = ecdf(samples)
x, y = ecdf(belmont_no_outliers)
# Plot the CDFs and show the plot
_ = plt.plot(x_theor, y_theor)
_ = plt.plot(x, y, marker='.', linestyle='none')
plt.margins(0.02)
_ = plt.xlabel('Belmont winning time (sec.)')
_ = plt.ylabel('CDF')
plt.show()
# Take a million samples out of the Normal distribution: samples
samples = np.random.normal(mu, sigma, size=1000000)
# Compute the fraction that are faster than 144 seconds: prob
prob = np.sum(samples<=144)/len(samples)
# Print the result
print('Probability of besting Secretariat:', prob)
def successive_poisson(tau1, tau2, size=1):
# Draw samples out of first exponential distribution: t1
t1 = np.random.exponential(tau1, size=size)
# Draw samples out of first exponential distribution: t2
t2 = np.random.exponential(tau2, size=size)
return t1 + t2
# Draw samples of waiting times: waiting_times
waiting_times = successive_poisson(764, 715, size=100000)
#print(waiting_times)
# Make the histogram
plt.hist(waiting_times, bins=100, histtype='step')
# plt.hist(waiting_times, bins=100, histtype='step', normed=True)
# Set margins and label axes
plt.margins(0.05)
plt.xlabel("total draw")
plt.ylabel("PDF")
# Show the plot
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating Random numbers
Step3: perform_bernoulli_trials - check the probability of Success
Step4: In baseball, a no-hitter is a game in which a pitcher does not allow the other team to get a hit. This is a rare event, and since the beginning of the so-called modern era of baseball (starting in 1901), there have only been 251 of them through the 2015 season in over 200,000 games. The ECDF of the number of no-hitters in a season is shown to the right. Which probability distribution would be appropriate to describe the number of no-hitters we would expect in a given season?
|
7,293
|
<ASSISTANT_TASK:>
Python Code:
import csv
id=[]
with open('../data/'+datafiles['mRNA']) as f:
my_csv = csv.reader(f,delimiter='\t')
id = my_csv.next()
stat={}
with open('../data/TCGA_Data/data_bcr_clinical_data_patient.csv') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
patient_id=row[1]
stat[patient_id]=(row[15],row[16],row[17])
import re
time_list = []
event_list = []
group_list = []
print('The following case IDs were not found in clinical data')
for index, key in enumerate(id[1:]):
m = re.match('TCGA-\d+-\d+', key)
patient_id = m.group(0)
if patient_id in stat:
patient_stat = stat[patient_id]
add_group = True
try:
time_list.append(float(patient_stat[2]))
event_list.append(1)
except ValueError:
try:
time_list.append(float(patient_stat[1]))
event_list.append(0)
except ValueError:
print('No data for %s' % patient_id)
add_group = False
if add_group:
group_list.append(cl[index])
else:
print(patient_id)
from lifelines import KaplanMeierFitter
kmf = KaplanMeierFitter()
kmf.fit(time_list,event_observed=event_list)
kmf.plot()
T=np.array(time_list)
E=np.array(event_list)
ix = (np.array(group_list) == 1)
kmf.fit(T[ix], E[ix], label='group 1')
ax=kmf.plot()
for i in [4]:
ix=(np.array(group_list)==i)
kmf.fit(T[ix], E[ix], label='group %d' % i)
kmf.plot(ax=ax)
T=np.array(time_list)
E=np.array(event_list)
ix = (np.array(group_list) == 0)
kmf.fit(T[ix], E[ix], label='group 0')
ax=kmf.plot()
for i in range(2,4):
ix=(np.array(group_list)==i)
kmf.fit(T[ix], E[ix], label='group %d' % i)
kmf.plot(ax=ax)
with open('/Users/gluca/SoftwareProjects/Thesis/MDBN/MATLAB_classes.txt') as f:
matlab_classes = f.readlines()
ML = []
for s in matlab_classes:
ML.append(float(s))
ML = np.asarray(ML)
new_cl = np.mod(ML+3,5)
new_cl
plt.hist(new_cl,bins=5)
np.sum(new_cl == cl)
319./385.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Comparison with MATLAB results
Step2: The class ids obtained from MATLAB and from Theano are not the same, the formula below happens to convert them
Step3: Number of samples with the same class
|
7,294
|
<ASSISTANT_TASK:>
Python Code:
import pymrio
from pathlib import Path
oecd_storage = Path('/tmp/mrios/OECD')
meta_2018_download = pymrio.download_oecd(storage_folder=oecd_storage, years=[2011])
oecd_path_year = pymrio.parse_oecd(path=oecd_storage, year=2011)
oecd_file = pymrio.parse_oecd(path=oecd_storage / 'ICIO2018_2011.zip')
oecd_path_year == oecd_file
oecd_file.factor_inputs.F.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: OECD provides the data compressed in zip files. The pymrio oecd parser works with both, the compressed and unpacked version.
Step2: Or directly specify a file to parse
Step3: Note
|
7,295
|
<ASSISTANT_TASK:>
Python Code:
import phidl.geometry as pg
from phidl import Device, Layer, LayerSet
from phidl import quickplot as qp
D = Device()
# Specify layer with a single integer 0-255 (gds datatype will be set to 0)
layer1 = 1
# Specify layer as 1, equivalent to layer = 2, datatype = 6
layer2 = (2,6)
# Specify layer as 2, equivalent to layer = 2, datatype = 0
layer3 = Layer(gds_layer = 3, gds_datatype = 5, color = 'gold')
D << pg.text('Layer (1,0)', layer = layer1)
D << pg.text('Layer (2,6)', layer = layer2).movey(-20)
D << pg.text('Layer (3,5)', layer = layer3).movey(-40)
qp(D)
# Note each element of the set must be a valid layer input by itself
my_layers = {1, (3,5), (3,6), (7,8)}
D = pg.ellipse(layer = my_layers)
ls = LayerSet() # Create a blank LayerSet
ls.add_layer(name = 'au', gds_layer = 4, gds_datatype = 0,
description = 'Gold wiring', color = 'goldenrod')
ls.add_layer(name = 'nb', gds_layer = 5, gds_datatype = 0,
description = 'Niobium liftoff', color = (0.4,0.5,0.7))
ls.add_layer('nb_etch', 6, 0, color = 'lightblue', alpha = 0.2)
ls.add_layer('silicon', 8, 2, color = 'green', alpha = 0.4)
D = Device()
gold_layer = ls['au']
D.add_ref( pg.text('Gold layer', size = 10, layer = ls['au']) ).movey(0)
D.add_ref( pg.text('Niobium layer', size = 10, layer = ls['nb']) ).movey(-20)
D.add_ref( pg.text('Nb Etch layer', size = 10, layer = ls['nb_etch']) ).movey(-40)
D.add_ref( pg.text('Si layer', size = 10, layer = ls['silicon']) ).movey(-60)
qp(D)
T = pg.text('All layers', layer = ls)
qp(T)
print(ls['nb'])
D = pg.preview_layerset(ls)
qp(D)
import phidl.utilities as pu
pu.write_lyp('my_layer_properties_file.lyp', layerset = ls)
def many_ellipses(layer1 = 1, layer2 = 2, layer3 = 3):
D = Device()
D << pg.ellipse(layer = layer1)
D << pg.ellipse(layer = layer2).movex(15)
D << pg.ellipse(layer = layer3).movex(30)
return D
qp(many_ellipses(layer1 = 1, layer2 = 2, layer3 = 3))
qp(many_ellipses(layer1 = 1, layer2 = None, layer3 = 3))
D = pg.preview_layerset(ls)
qp(D)
D.remove_layers(layers = [6,8])
qp(D)
D.remove_layers(layers = [(8,2)])
qp(D)
D.remove_layers(layers = [4], invert_selection = True)
qp(D)
D = pg.preview_layerset(ls)
qp(D)
D.remap_layers(layermap = {(8,2): 99,
6 : 77,
})
qp(D)
D = Device()
D << pg.ellipse(layer = 1)
D << pg.rectangle(size = (10,10), layer = 2).movex(15)
D << pg.arc(width = 10, layer = 3).movex(25)
D << pg.circle(radius = 5, layer = 2).movex(50)
qp(D)
D_only_layers_1_and_2 = pg.extract(D, layers = [1,2])
qp(D_only_layers_1_and_2)
qp(D)
D_copied = pg.copy_layer(D, layer = 3, new_layer = 1)
qp(D_copied)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multiple layers
Step2: Note that although we specified four different layers, it did not produce four separate ellipse Devices--instead, it produced one Device with all four ellipse polygons inside that single Device.
Step3: Now that our layers are defined, we can call them from the LayerSet in the same way we would from a dictionary, where the name becomes the key
Step4: We can additionally use a LayerSet to add the same structure to several layers at once by passing the whole layerset to the layer argument. Note that since they all overlap, it will look like a single object (but rest assured there are multiple layers)
Step5: If we want to examine any single layer, we can call them by their names,
Step6: We can quickly preview our color scheme using the pg.preview_layerset() function as well.
Step7: Making KLayout .lyp files
Step8: Ignoring layers with None
Step9: Now say we want to remove the middle layer entirely. We note that it's defined by the layer layer2, so we can set that to None and it won't be added at all
Step10: Removing layers
Step11: Now let's remove layers 6 and 8. This should leave us with just the gold (layer 4,0) and nb (5,0) layers.
Step12: Whoops! Note that because we specified just an 8 as the layer we wanted to remove, PHIDL interpreted it as (8,0), so it didn't remove the the silicon layer that is actually on (8,2). Let's try again
Step13: That's better.
Step14: Remapping (moving) layers
Step15: Now say that we wanted to move shapes on the silicon layer (8,2) to layer 99, and nb_etch layer (6,0) to layer 77 but leave the other layers alone. We do this by construction a python dictionary and passing to remap_layers()
Step16: Very good! We can see from the colors of the bottom polygons changing that the layer remapping worked.
Step17: Next, let's use pg.extract() to create a new Device which only has the objects on layers 1 and 2
Step18: Note that the Device created from this function is flattened
Step19: ...and let's copy the arc (yellow / layer 3) arc to a new layer (blue / layer 1)
|
7,296
|
<ASSISTANT_TASK:>
Python Code:
[1,2,3,4,5,6,7,8,9,10]
a = [1,2,3,4,5,6,7,8,9,10]
a
list(range(100))
b = list(range(100))
b
a.append('ich bin keine Zahl')
a
b.append("ich bin keine Zahl")
b
a.pop()
a
b.pop()
b
str(a)
str(b)
b = list(range(100))
b[89:]
dct_lst = [{'Stadt': 'Zürich', 'Bevölkerung': 402651, 'Kanton': 'Zürich'}, {'Stadt': 'Genf', 'Bevölkerung': 198899, 'Kanton': 'Genf'}, {'Stadt': 'Basel', 'Bevölkerung': 171000, 'Kanton': 'Basel-Stadt'}, {'Stadt': 'Lausanne', 'Bevölkerung': 137000, 'Kanton': 'Waadt'}, {'Stadt': 'Bern', 'Bevölkerung': 133000, 'Kanton': 'Bern'}]
dct_lst[1]['Bevölkerung']
dct_lst[0]['Bevölkerung'] + dct_lst[1]['Bevölkerung'] + dct_lst[2]['Bevölkerung'] + dct_lst[3]['Bevölkerung'] + dct_lst[4]['Bevölkerung']
print(dct_lst[0]['Bevölkerung'] + dct_lst[1]['Bevölkerung'] + dct_lst[2]['Bevölkerung'] + dct_lst[3]['Bevölkerung'] + dct_lst[4]['Bevölkerung'])
for dic in dct_lst:
print(dic['Stadt'] + ': ' + str(dic['Bevölkerung']))
dct_lst.append([{'Stadt': 'Winterthur', 'Bevölkerung': 110000, 'Kanton': 'Zürich'}, {'Stadt': 'Luzern', 'Bevölkerung': 81000, 'Kanton': 'Luzern'}])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.Mache dasselbe mit einer Liste aus 100 Elementen und ordne sie der Variabel b zu.
Step2: 3.Füge beiden Listen folgenden String an
Step3: 4.Lösche diesen letzten Eintrag in der Liste wieder
Step4: 5.Verwandle jede Zahl in den Listen a und b von int in str?
Step5: 6.Von der list b, zeige nur die letzten zehn Nummern an
Step6: 7.Von der folgenden Liste, zeige den zweit grössten Wert an
Step7: 10.Zeige nur die Bevölkerung der Stadt Genf an
Step8: 11.Drucke (print) das Total der Bevölkerungen aller Städte aus
Step9: 12.Rechne den Anteil der Bevölkerung der jeweiligen Städte an der Gesamtbevölkerung der Schweiz aus und "print" die Ergebnisse neben dem Städtenamen
Step10: 13.Füge noch die Städte Winterthur und Luzern hinzu
|
7,297
|
<ASSISTANT_TASK:>
Python Code:
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
radian = UNITS.radian
m = UNITS.meter
s = UNITS.second
kg = UNITS.kilogram
N = UNITS.newton
params = Params(radius_disk=0.5*m,
mass_disk=7*kg,
radius_pot=0.4*m,
mass_pot=0.3*kg,
force=1*N,
torque_friction=0.2*N*m,
theta_end=0.5*radian,
t_end=20*s)
def make_system(params):
Make a system object.
params: Params object
returns: System object
mass_disk, mass_pot = params.mass_disk, params.mass_pot
radius_disk, radius_pot = params.radius_disk, params.radius_pot
init = State(theta=0*radian, omega=0*radian/s)
I_disk = mass_disk * radius_disk**2 / 2
I_pot = mass_pot * radius_pot**2
return System(params, init=init, I=I_disk+I_pot)
system1 = make_system(params)
def slope_func(state, t, system):
Computes the derivatives of the state variables.
state: State object
t: time
system: System object
returns: sequence of derivatives
theta, omega = state
radius_disk, force = system.radius_disk, system.force
torque_friction, I = system.torque_friction, system.I
torque = radius_disk * force - torque_friction
alpha = torque / I
return omega, alpha
slope_func(system1.init, 0, system1)
def event_func1(state, t, system):
Stops when theta reaches theta_end.
state: State object
t: time
system: System object
returns: difference from target
theta, omega = state
return theta - system.theta_end
event_func1(system1.init, 0, system1)
results1, details1 = run_ode_solver(system1, slope_func, events=event_func1)
details1
results1.tail()
t_0 = results1.last_label() * s
init2 = results1.last_row()
system2 = System(system1, t_0=t_0, init=init2, force=0*N)
def event_func2(state, t, system):
Stops when omega is 0.
state: State object
t: time
system: System object
returns: omega
theta, omega = state
return omega
event_func2(system2.init, 0, system2)
slope_func(system2.init, system2.t_0, system2)
results2, details2 = run_ode_solver(system2, slope_func, events=event_func2)
details2
results2.tail()
results = results1.combine_first(results2)
results.tail()
def plot_theta(results):
plot(results.theta, label='theta')
decorate(xlabel='Time (s)',
ylabel='Angle (rad)')
plot_theta(results)
def plot_omega(results):
plot(results.omega, label='omega', color='C1')
decorate(xlabel='Time (s)',
ylabel='Angular velocity (rad/s)')
plot_omega(results)
subplot(2, 1, 1)
plot_theta(results)
subplot(2, 1, 2)
plot_omega(results)
savefig('figs/chap25-fig01.pdf')
def run_two_phases(force, torque_friction, params):
Run both phases.
force: force applied to the turntable
torque_friction: friction due to torque
params: Params object
returns: TimeFrame of simulation results
# put the specified parameters into the Params object
params = Params(params, force=force, torque_friction=torque_friction)
# run phase 1
system1 = make_system(params)
results1, details1 = run_ode_solver(system1, slope_func,
events=event_func1)
# get the final state from phase 1
t_0 = results1.last_label() * s
init2 = results1.last_row()
# run phase 2
system2 = System(system1, t_0=t_0, init=init2, force=0*N)
results2, details2 = run_ode_solver(system2, slope_func,
events=event_func2)
# combine and return the results
results = results1.combine_first(results2)
return TimeFrame(results)
force = 1*N
torque_friction = 0.2*N*m
results = run_two_phases(force, torque_friction, params)
results.tail()
theta_final = results.last_row().theta
def error_func1(torque_friction, params):
Error function for root_scalar.
torque_friction: hypothetical value
params: Params object
returns: offset from target value
force = 1 * N
results = run_two_phases(force, torque_friction, params)
theta_final = results.last_row().theta
print(torque_friction, theta_final)
return theta_final - 1.5 * radian
guess1 = 0.1*N*m
error_func1(guess1, params)
guess2 = 0.3*N*m
error_func1(guess2, params)
res = root_bisect(error_func1, [guess1, guess2], params)
torque_friction = res.root
force = 1 * N
results = run_two_phases(force, torque_friction, params)
theta_final = get_last_value(results.theta)
from matplotlib.patches import Circle
from matplotlib.patches import Arrow
def draw_func(state, t):
theta, omega = state
# draw a circle for the table
radius_disk = magnitude(params.radius_disk)
circle1 = Circle([0, 0], radius_disk)
plt.gca().add_patch(circle1)
# draw a circle for the teapot
radius_pot = magnitude(params.radius_pot)
center = pol2cart(theta, radius_pot)
circle2 = Circle(center, 0.05, color='C1')
plt.gca().add_patch(circle2)
# make the aspect ratio 1
plt.axis('equal')
state = results.first_row()
draw_func(state, 0)
animate(results, draw_func)
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
force = res.root
results = run_two_phases(force, torque_friction, params)
theta_final = get_last_value(results.theta)
remaining_angle = np.pi - 1.5
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Teapots and Turntables
Step2: And store the parameters in a Params object.
Step4: make_system creates the initial state, init, and computes the total moment of inertia for the turntable and the teapot.
Step5: Here's the System object we'll use for the first phase of the simulation, while I am pushing the turntable.
Step7: Simulation
Step8: As always, we'll test the slope function before running the simulation.
Step10: Here's an event function that stops the simulation when theta reaches theta_end.
Step11: Now we can run the first phase.
Step12: And look at the results.
Step13: Phase 2
Step14: And make an initial State object for Phase 2.
Step15: And a new System object with zero force.
Step17: Here's an event function that stops when angular velocity is 0.
Step18: Now we can run the second phase.
Step19: And check the results.
Step20: Pandas provides combine_first, which combines results1 and results2.
Step21: Now we can plot theta for both phases.
Step22: And omega.
Step24: Estimating friction
Step25: Let's test it with the same parameters.
Step26: And check the results.
Step28: Here's the error function we'll use with root_bisect.
Step29: Testing the error function.
Step30: And running root_scalar.
Step31: The result is the coefficient of friction that yields a total rotation of 1.5 radian.
Step32: Here's a test run with the estimated value.
Step33: Looks good.
Step34: Exercises
Step35: Test the error function with force=1
Step36: And run root_bisect to find the desired force.
Step37: Exercise
|
7,298
|
<ASSISTANT_TASK:>
Python Code:
df_train = pd.DataFrame()
# MNCHN
df_train['body'] = df_mnchn['body'].append(df_mnchn['Final Keywords'])
df_train['label'] = 1
# Adolescent
df_train = df_train.append(pd.DataFrame({
'body': df_adolescent['body'].append(df_adolescent['Final Keywords']),
'label': 2
}))
# Geriatrics
df_train = df_train.append(pd.DataFrame({
'body': df_geriatric['body'].append(df_geriatric['Final Keywords']),
'label': 3
}))
# Special Populations
df_train = df_train.append(pd.DataFrame({
'body': df_specpop['body'].append(df_specpop['Final Keywords']),
'label': 4
}))
df_train.reset_index(drop=True, inplace=True)
# Other Disregard atm
# df_train = df_train.append(pd.DataFrame({
# 'body': df_specpop['body'].append(df_specpop['Final Keywords']),
# 'label': 4
# set.difference(set(df.title),set(df_mnchn.AO).union(
# set(df_adolescent.AO)).union(
# set(df_geriatric.AO)).union(
# set(df_specpop.AO)))
# }))
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
tfid_params = {
'stop_words':stopwords.words(),
'ngram_range': (1,4),
'strip_accents':'ascii',
}
text_clf = Pipeline([('vect_tfid', TfidfVectorizer(**tfid_params)),
('clf', MultinomialNB()),
])
model_cycle_1 = text_clf.fit(df_train.body, df_train.label)
results = pd.DataFrame(model_cycle_1.predict_proba(df.body), columns=themes)
results['AO'] = df.title
for theme in themes:
results.sort_values(by=theme, ascending=False)[:40][['AO', theme]].to_csv(theme+'_cycle1_results.csv', index=False)
df_mnchn.drop('body', axis=1).to_csv('mnchn_cycle1_keywords.csv', index=False)
df_geriatric.drop('body', axis=1).to_csv('geriatric_cycle1_keywords.csv', index=False)
df_adolescent.drop('body', axis=1).to_csv('adolescent_cycle1_keywords.csv', index=False)
df_specpop.drop('body', axis=1).to_csv('specpop_cycle1_keywords.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classification Pipeline
|
7,299
|
<ASSISTANT_TASK:>
Python Code:
from pyspark.mllib.recommendation import Rating
ratingsRDD = sc.textFile('ratings.dat') \
.map(lambda l: l.split("::")) \
.map(lambda p: Rating(
user = int(p[0]),
product = int(p[1]),
rating = float(p[2]),
)).cache()
(training, test) = ratingsRDD.randomSplit([0.8, 0.2])
numTraining = training.count()
numTest = test.count()
# verify row counts for each dataset
print("Total: {0}, Training: {1}, test: {2}".format(ratingsRDD.count(), numTraining, numTest))
from pyspark.mllib.recommendation import ALS
rank = 50
numIterations = 20
lambdaParam = 0.1
model = ALS.train(training, rank, numIterations, lambdaParam)
import numpy as np
pf = model.productFeatures().cache()
pf_keys = pf.sortByKey().keys().collect()
pf_vals = pf.sortByKey().map(lambda x: list(x[1])).collect()
Vt = np.matrix(np.asarray(pf.values().collect()))
full_u = np.zeros(len(pf_keys))
full_u.itemset(1, 5) # user has rated product_id:1 = 5
recommendations = full_u*Vt*Vt.T
print("predicted rating value", np.sort(recommendations)[:,-10:])
top_ten_recommended_product_ids = np.where(recommendations >= np.sort(recommendations)[:,-10:].min())[1]
print("predict rating prod_id", np.array_repr(top_ten_recommended_product_ids))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Split into training and testing
Step2: Build the recommendation model using ALS on the training data
Step3: Extract the product (movie) features
Step4: Simulate a new user rating a movie
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.